1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
29 #include "iavf_rxtx.h"
32 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
34 /* The following constraints must be satisfied:
35 * thresh < rxq->nb_rx_desc
37 if (thresh >= nb_desc) {
38 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
46 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
47 uint16_t tx_free_thresh)
49 /* TX descriptors will have their RS bit set after tx_rs_thresh
50 * descriptors have been used. The TX descriptor ring will be cleaned
51 * after tx_free_thresh descriptors are used or if the number of
52 * descriptors required to transmit a packet is greater than the
53 * number of free TX descriptors.
55 * The following constraints must be satisfied:
56 * - tx_rs_thresh must be less than the size of the ring minus 2.
57 * - tx_free_thresh must be less than the size of the ring minus 3.
58 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
59 * - tx_rs_thresh must be a divisor of the ring size.
61 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
62 * race condition, hence the maximum threshold constraints. When set
63 * to zero use default values.
65 if (tx_rs_thresh >= (nb_desc - 2)) {
66 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
67 "number of TX descriptors (%u) minus 2",
68 tx_rs_thresh, nb_desc);
71 if (tx_free_thresh >= (nb_desc - 3)) {
72 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
73 "number of TX descriptors (%u) minus 3.",
74 tx_free_thresh, nb_desc);
77 if (tx_rs_thresh > tx_free_thresh) {
78 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
79 "equal to tx_free_thresh (%u).",
80 tx_rs_thresh, tx_free_thresh);
83 if ((nb_desc % tx_rs_thresh) != 0) {
84 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
85 "number of TX descriptors (%u).",
86 tx_rs_thresh, nb_desc);
94 check_rx_vec_allow(struct iavf_rx_queue *rxq)
96 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
97 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
98 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
102 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
107 check_tx_vec_allow(struct iavf_tx_queue *txq)
109 if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
110 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
111 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
112 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
115 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
120 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
124 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
125 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
126 "rxq->rx_free_thresh=%d, "
127 "IAVF_RX_MAX_BURST=%d",
128 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
130 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
131 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
132 "rxq->nb_rx_desc=%d, "
133 "rxq->rx_free_thresh=%d",
134 rxq->nb_rx_desc, rxq->rx_free_thresh);
141 reset_rx_queue(struct iavf_rx_queue *rxq)
149 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
151 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
152 ((volatile char *)rxq->rx_ring)[i] = 0;
154 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
156 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
157 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
160 rxq->rx_nb_avail = 0;
161 rxq->rx_next_avail = 0;
162 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
166 rxq->pkt_first_seg = NULL;
167 rxq->pkt_last_seg = NULL;
171 reset_tx_queue(struct iavf_tx_queue *txq)
173 struct iavf_tx_entry *txe;
178 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
183 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
184 for (i = 0; i < size; i++)
185 ((volatile char *)txq->tx_ring)[i] = 0;
187 prev = (uint16_t)(txq->nb_tx_desc - 1);
188 for (i = 0; i < txq->nb_tx_desc; i++) {
189 txq->tx_ring[i].cmd_type_offset_bsz =
190 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
193 txe[prev].next_id = i;
200 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
201 txq->nb_free = txq->nb_tx_desc - 1;
203 txq->next_dd = txq->rs_thresh - 1;
204 txq->next_rs = txq->rs_thresh - 1;
208 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
210 volatile union iavf_rx_desc *rxd;
211 struct rte_mbuf *mbuf = NULL;
215 for (i = 0; i < rxq->nb_rx_desc; i++) {
216 mbuf = rte_mbuf_raw_alloc(rxq->mp);
217 if (unlikely(!mbuf)) {
218 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
222 rte_mbuf_refcnt_set(mbuf, 1);
224 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
226 mbuf->port = rxq->port_id;
229 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
231 rxd = &rxq->rx_ring[i];
232 rxd->read.pkt_addr = dma_addr;
233 rxd->read.hdr_addr = 0;
234 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
239 rxq->sw_ring[i] = mbuf;
246 release_rxq_mbufs(struct iavf_rx_queue *rxq)
253 for (i = 0; i < rxq->nb_rx_desc; i++) {
254 if (rxq->sw_ring[i]) {
255 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
256 rxq->sw_ring[i] = NULL;
261 if (rxq->rx_nb_avail == 0)
263 for (i = 0; i < rxq->rx_nb_avail; i++) {
264 struct rte_mbuf *mbuf;
266 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
267 rte_pktmbuf_free_seg(mbuf);
269 rxq->rx_nb_avail = 0;
273 release_txq_mbufs(struct iavf_tx_queue *txq)
277 if (!txq || !txq->sw_ring) {
278 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
282 for (i = 0; i < txq->nb_tx_desc; i++) {
283 if (txq->sw_ring[i].mbuf) {
284 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
285 txq->sw_ring[i].mbuf = NULL;
290 static const struct iavf_rxq_ops def_rxq_ops = {
291 .release_mbufs = release_rxq_mbufs,
294 static const struct iavf_txq_ops def_txq_ops = {
295 .release_mbufs = release_txq_mbufs,
299 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
300 uint16_t nb_desc, unsigned int socket_id,
301 const struct rte_eth_rxconf *rx_conf,
302 struct rte_mempool *mp)
304 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
305 struct iavf_adapter *ad =
306 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
307 struct iavf_info *vf =
308 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
309 struct iavf_vsi *vsi = &vf->vsi;
310 struct iavf_rx_queue *rxq;
311 const struct rte_memzone *mz;
314 uint16_t rx_free_thresh;
316 PMD_INIT_FUNC_TRACE();
318 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
319 nb_desc > IAVF_MAX_RING_DESC ||
320 nb_desc < IAVF_MIN_RING_DESC) {
321 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
326 /* Check free threshold */
327 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
328 IAVF_DEFAULT_RX_FREE_THRESH :
329 rx_conf->rx_free_thresh;
330 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
333 /* Free memory if needed */
334 if (dev->data->rx_queues[queue_idx]) {
335 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
336 dev->data->rx_queues[queue_idx] = NULL;
339 /* Allocate the rx queue data structure */
340 rxq = rte_zmalloc_socket("iavf rxq",
341 sizeof(struct iavf_rx_queue),
345 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
346 "rx queue data structure");
350 if (vf->vf_res->vf_cap_flags &
351 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
352 vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
353 rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
355 rxq->rxdid = IAVF_RXDID_LEGACY_1;
359 rxq->nb_rx_desc = nb_desc;
360 rxq->rx_free_thresh = rx_free_thresh;
361 rxq->queue_id = queue_idx;
362 rxq->port_id = dev->data->port_id;
363 rxq->crc_len = 0; /* crc stripping by default */
364 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
368 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
369 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
371 /* Allocate the software ring. */
372 len = nb_desc + IAVF_RX_MAX_BURST;
374 rte_zmalloc_socket("iavf rx sw ring",
375 sizeof(struct rte_mbuf *) * len,
379 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
384 /* Allocate the maximun number of RX ring hardware descriptor with
385 * a liitle more to support bulk allocate.
387 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
388 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
390 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
391 ring_size, IAVF_RING_BASE_ALIGN,
394 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
395 rte_free(rxq->sw_ring);
399 /* Zero all the descriptors in the ring. */
400 memset(mz->addr, 0, ring_size);
401 rxq->rx_ring_phys_addr = mz->iova;
402 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
407 dev->data->rx_queues[queue_idx] = rxq;
408 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
409 rxq->ops = &def_rxq_ops;
411 if (check_rx_bulk_allow(rxq) == true) {
412 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
413 "satisfied. Rx Burst Bulk Alloc function will be "
414 "used on port=%d, queue=%d.",
415 rxq->port_id, rxq->queue_id);
417 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
418 "not satisfied, Scattered Rx is requested "
419 "on port=%d, queue=%d.",
420 rxq->port_id, rxq->queue_id);
421 ad->rx_bulk_alloc_allowed = false;
424 if (check_rx_vec_allow(rxq) == false)
425 ad->rx_vec_allowed = false;
431 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
434 unsigned int socket_id,
435 const struct rte_eth_txconf *tx_conf)
437 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438 struct iavf_tx_queue *txq;
439 const struct rte_memzone *mz;
441 uint16_t tx_rs_thresh, tx_free_thresh;
444 PMD_INIT_FUNC_TRACE();
446 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
448 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
449 nb_desc > IAVF_MAX_RING_DESC ||
450 nb_desc < IAVF_MIN_RING_DESC) {
451 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
456 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
457 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
458 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
459 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
460 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
462 /* Free memory if needed. */
463 if (dev->data->tx_queues[queue_idx]) {
464 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
465 dev->data->tx_queues[queue_idx] = NULL;
468 /* Allocate the TX queue data structure. */
469 txq = rte_zmalloc_socket("iavf txq",
470 sizeof(struct iavf_tx_queue),
474 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
475 "tx queue structure");
479 txq->nb_tx_desc = nb_desc;
480 txq->rs_thresh = tx_rs_thresh;
481 txq->free_thresh = tx_free_thresh;
482 txq->queue_id = queue_idx;
483 txq->port_id = dev->data->port_id;
484 txq->offloads = offloads;
485 txq->tx_deferred_start = tx_conf->tx_deferred_start;
487 /* Allocate software ring */
489 rte_zmalloc_socket("iavf tx sw ring",
490 sizeof(struct iavf_tx_entry) * nb_desc,
494 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
499 /* Allocate TX hardware ring descriptors. */
500 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
501 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
502 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
503 ring_size, IAVF_RING_BASE_ALIGN,
506 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
507 rte_free(txq->sw_ring);
511 txq->tx_ring_phys_addr = mz->iova;
512 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
517 dev->data->tx_queues[queue_idx] = txq;
518 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
519 txq->ops = &def_txq_ops;
521 if (check_tx_vec_allow(txq) == false) {
522 struct iavf_adapter *ad =
523 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
524 ad->tx_vec_allowed = false;
531 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
533 struct iavf_adapter *adapter =
534 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
535 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
536 struct iavf_rx_queue *rxq;
539 PMD_DRV_FUNC_TRACE();
541 if (rx_queue_id >= dev->data->nb_rx_queues)
544 rxq = dev->data->rx_queues[rx_queue_id];
546 err = alloc_rxq_mbufs(rxq);
548 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
554 /* Init the RX tail register. */
555 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
556 IAVF_WRITE_FLUSH(hw);
558 /* Ready to switch the queue on */
559 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
561 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
564 dev->data->rx_queue_state[rx_queue_id] =
565 RTE_ETH_QUEUE_STATE_STARTED;
571 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
573 struct iavf_adapter *adapter =
574 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
575 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
576 struct iavf_tx_queue *txq;
579 PMD_DRV_FUNC_TRACE();
581 if (tx_queue_id >= dev->data->nb_tx_queues)
584 txq = dev->data->tx_queues[tx_queue_id];
586 /* Init the RX tail register. */
587 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
588 IAVF_WRITE_FLUSH(hw);
590 /* Ready to switch the queue on */
591 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
594 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
597 dev->data->tx_queue_state[tx_queue_id] =
598 RTE_ETH_QUEUE_STATE_STARTED;
604 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
606 struct iavf_adapter *adapter =
607 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
608 struct iavf_rx_queue *rxq;
611 PMD_DRV_FUNC_TRACE();
613 if (rx_queue_id >= dev->data->nb_rx_queues)
616 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
618 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
623 rxq = dev->data->rx_queues[rx_queue_id];
624 rxq->ops->release_mbufs(rxq);
626 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
632 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
634 struct iavf_adapter *adapter =
635 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
636 struct iavf_tx_queue *txq;
639 PMD_DRV_FUNC_TRACE();
641 if (tx_queue_id >= dev->data->nb_tx_queues)
644 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
646 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
651 txq = dev->data->tx_queues[tx_queue_id];
652 txq->ops->release_mbufs(txq);
654 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
660 iavf_dev_rx_queue_release(void *rxq)
662 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
667 q->ops->release_mbufs(q);
668 rte_free(q->sw_ring);
669 rte_memzone_free(q->mz);
674 iavf_dev_tx_queue_release(void *txq)
676 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
681 q->ops->release_mbufs(q);
682 rte_free(q->sw_ring);
683 rte_memzone_free(q->mz);
688 iavf_stop_queues(struct rte_eth_dev *dev)
690 struct iavf_adapter *adapter =
691 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
692 struct iavf_rx_queue *rxq;
693 struct iavf_tx_queue *txq;
696 /* Stop All queues */
697 ret = iavf_disable_queues(adapter);
699 PMD_DRV_LOG(WARNING, "Fail to stop queues");
701 for (i = 0; i < dev->data->nb_tx_queues; i++) {
702 txq = dev->data->tx_queues[i];
705 txq->ops->release_mbufs(txq);
707 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
709 for (i = 0; i < dev->data->nb_rx_queues; i++) {
710 rxq = dev->data->rx_queues[i];
713 rxq->ops->release_mbufs(rxq);
715 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
720 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
722 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
723 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
724 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
726 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
733 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
734 volatile union iavf_rx_flex_desc *rxdp)
736 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
737 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
738 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
740 rte_le_to_cpu_16(rxdp->wb.l2tag1);
746 /* Translate the rx descriptor status and error fields to pkt flags */
747 static inline uint64_t
748 iavf_rxd_to_pkt_flags(uint64_t qword)
751 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
753 #define IAVF_RX_ERR_BITS 0x3f
755 /* Check if RSS_HASH */
756 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
757 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
758 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
760 /* Check if FDIR Match */
761 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
764 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
765 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
769 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
770 flags |= PKT_RX_IP_CKSUM_BAD;
772 flags |= PKT_RX_IP_CKSUM_GOOD;
774 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
775 flags |= PKT_RX_L4_CKSUM_BAD;
777 flags |= PKT_RX_L4_CKSUM_GOOD;
779 /* TODO: Oversize error bit is not processed here */
784 static inline uint64_t
785 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
788 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
791 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
792 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
793 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
795 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
797 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
798 flags |= PKT_RX_FDIR_ID;
802 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
803 flags |= PKT_RX_FDIR_ID;
809 /* Translate the rx flex descriptor status to pkt flags */
811 iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
812 volatile union iavf_rx_flex_desc *rxdp)
814 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
815 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
816 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
819 stat_err = rte_le_to_cpu_16(desc->status_error0);
820 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
821 mb->ol_flags |= PKT_RX_RSS_HASH;
822 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
826 if (desc->flow_id != 0xFFFFFFFF) {
827 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
828 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
832 #define IAVF_RX_FLEX_ERR0_BITS \
833 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
834 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
835 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
836 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
837 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
838 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
840 /* Rx L3/L4 checksum */
841 static inline uint64_t
842 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
846 /* check if HW has decoded the packet and checksum */
847 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
850 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
851 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
855 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
856 flags |= PKT_RX_IP_CKSUM_BAD;
858 flags |= PKT_RX_IP_CKSUM_GOOD;
860 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
861 flags |= PKT_RX_L4_CKSUM_BAD;
863 flags |= PKT_RX_L4_CKSUM_GOOD;
865 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
866 flags |= PKT_RX_EIP_CKSUM_BAD;
871 /* If the number of free RX descriptors is greater than the RX free
872 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
873 * register. Update the RDT with the value of the last processed RX
874 * descriptor minus 1, to guarantee that the RDT register is never
875 * equal to the RDH register, which creates a "full" ring situation
876 * from the hardware point of view.
879 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
881 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
883 if (nb_hold > rxq->rx_free_thresh) {
885 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
886 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
887 rx_id = (uint16_t)((rx_id == 0) ?
888 (rxq->nb_rx_desc - 1) : (rx_id - 1));
889 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
892 rxq->nb_rx_hold = nb_hold;
895 /* implement recv_pkts */
897 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
899 volatile union iavf_rx_desc *rx_ring;
900 volatile union iavf_rx_desc *rxdp;
901 struct iavf_rx_queue *rxq;
902 union iavf_rx_desc rxd;
903 struct rte_mbuf *rxe;
904 struct rte_eth_dev *dev;
905 struct rte_mbuf *rxm;
906 struct rte_mbuf *nmb;
910 uint16_t rx_packet_len;
911 uint16_t rx_id, nb_hold;
914 const uint32_t *ptype_tbl;
919 rx_id = rxq->rx_tail;
920 rx_ring = rxq->rx_ring;
921 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
923 while (nb_rx < nb_pkts) {
924 rxdp = &rx_ring[rx_id];
925 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
926 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
927 IAVF_RXD_QW1_STATUS_SHIFT;
929 /* Check the DD bit first */
930 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
932 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
934 nmb = rte_mbuf_raw_alloc(rxq->mp);
935 if (unlikely(!nmb)) {
936 dev = &rte_eth_devices[rxq->port_id];
937 dev->data->rx_mbuf_alloc_failed++;
938 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
939 "queue_id=%u", rxq->port_id, rxq->queue_id);
945 rxe = rxq->sw_ring[rx_id];
947 if (unlikely(rx_id == rxq->nb_rx_desc))
950 /* Prefetch next mbuf */
951 rte_prefetch0(rxq->sw_ring[rx_id]);
953 /* When next RX descriptor is on a cache line boundary,
954 * prefetch the next 4 RX descriptors and next 8 pointers
957 if ((rx_id & 0x3) == 0) {
958 rte_prefetch0(&rx_ring[rx_id]);
959 rte_prefetch0(rxq->sw_ring[rx_id]);
963 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
964 rxdp->read.hdr_addr = 0;
965 rxdp->read.pkt_addr = dma_addr;
967 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
968 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
970 rxm->data_off = RTE_PKTMBUF_HEADROOM;
971 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
974 rxm->pkt_len = rx_packet_len;
975 rxm->data_len = rx_packet_len;
976 rxm->port = rxq->port_id;
978 iavf_rxd_to_vlan_tci(rxm, &rxd);
979 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
981 ptype_tbl[(uint8_t)((qword1 &
982 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
984 if (pkt_flags & PKT_RX_RSS_HASH)
986 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
988 if (pkt_flags & PKT_RX_FDIR)
989 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
991 rxm->ol_flags |= pkt_flags;
993 rx_pkts[nb_rx++] = rxm;
995 rxq->rx_tail = rx_id;
997 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1002 /* implement recv_pkts for flexible Rx descriptor */
1004 iavf_recv_pkts_flex_rxd(void *rx_queue,
1005 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1007 volatile union iavf_rx_desc *rx_ring;
1008 volatile union iavf_rx_flex_desc *rxdp;
1009 struct iavf_rx_queue *rxq;
1010 union iavf_rx_flex_desc rxd;
1011 struct rte_mbuf *rxe;
1012 struct rte_eth_dev *dev;
1013 struct rte_mbuf *rxm;
1014 struct rte_mbuf *nmb;
1016 uint16_t rx_stat_err0;
1017 uint16_t rx_packet_len;
1018 uint16_t rx_id, nb_hold;
1021 const uint32_t *ptype_tbl;
1026 rx_id = rxq->rx_tail;
1027 rx_ring = rxq->rx_ring;
1028 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1030 while (nb_rx < nb_pkts) {
1031 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1032 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1034 /* Check the DD bit first */
1035 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1037 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1039 nmb = rte_mbuf_raw_alloc(rxq->mp);
1040 if (unlikely(!nmb)) {
1041 dev = &rte_eth_devices[rxq->port_id];
1042 dev->data->rx_mbuf_alloc_failed++;
1043 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1044 "queue_id=%u", rxq->port_id, rxq->queue_id);
1050 rxe = rxq->sw_ring[rx_id];
1052 if (unlikely(rx_id == rxq->nb_rx_desc))
1055 /* Prefetch next mbuf */
1056 rte_prefetch0(rxq->sw_ring[rx_id]);
1058 /* When next RX descriptor is on a cache line boundary,
1059 * prefetch the next 4 RX descriptors and next 8 pointers
1062 if ((rx_id & 0x3) == 0) {
1063 rte_prefetch0(&rx_ring[rx_id]);
1064 rte_prefetch0(rxq->sw_ring[rx_id]);
1068 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1069 rxdp->read.hdr_addr = 0;
1070 rxdp->read.pkt_addr = dma_addr;
1072 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1073 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1075 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1076 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1079 rxm->pkt_len = rx_packet_len;
1080 rxm->data_len = rx_packet_len;
1081 rxm->port = rxq->port_id;
1083 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1084 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1085 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1086 iavf_rxd_to_pkt_fields(rxm, &rxd);
1087 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1088 rxm->ol_flags |= pkt_flags;
1090 rx_pkts[nb_rx++] = rxm;
1092 rxq->rx_tail = rx_id;
1094 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1099 /* implement recv_scattered_pkts for flexible Rx descriptor */
1101 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1104 struct iavf_rx_queue *rxq = rx_queue;
1105 union iavf_rx_flex_desc rxd;
1106 struct rte_mbuf *rxe;
1107 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1108 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1109 struct rte_mbuf *nmb, *rxm;
1110 uint16_t rx_id = rxq->rx_tail;
1111 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1112 struct rte_eth_dev *dev;
1113 uint16_t rx_stat_err0;
1117 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1118 volatile union iavf_rx_flex_desc *rxdp;
1119 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1121 while (nb_rx < nb_pkts) {
1122 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1123 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1125 /* Check the DD bit */
1126 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1128 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1130 nmb = rte_mbuf_raw_alloc(rxq->mp);
1131 if (unlikely(!nmb)) {
1132 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1133 "queue_id=%u", rxq->port_id, rxq->queue_id);
1134 dev = &rte_eth_devices[rxq->port_id];
1135 dev->data->rx_mbuf_alloc_failed++;
1141 rxe = rxq->sw_ring[rx_id];
1143 if (rx_id == rxq->nb_rx_desc)
1146 /* Prefetch next mbuf */
1147 rte_prefetch0(rxq->sw_ring[rx_id]);
1149 /* When next RX descriptor is on a cache line boundary,
1150 * prefetch the next 4 RX descriptors and next 8 pointers
1153 if ((rx_id & 0x3) == 0) {
1154 rte_prefetch0(&rx_ring[rx_id]);
1155 rte_prefetch0(rxq->sw_ring[rx_id]);
1160 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1162 /* Set data buffer address and data length of the mbuf */
1163 rxdp->read.hdr_addr = 0;
1164 rxdp->read.pkt_addr = dma_addr;
1165 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1166 IAVF_RX_FLX_DESC_PKT_LEN_M;
1167 rxm->data_len = rx_packet_len;
1168 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1170 /* If this is the first buffer of the received packet, set the
1171 * pointer to the first mbuf of the packet and initialize its
1172 * context. Otherwise, update the total length and the number
1173 * of segments of the current scattered packet, and update the
1174 * pointer to the last mbuf of the current packet.
1178 first_seg->nb_segs = 1;
1179 first_seg->pkt_len = rx_packet_len;
1181 first_seg->pkt_len =
1182 (uint16_t)(first_seg->pkt_len +
1184 first_seg->nb_segs++;
1185 last_seg->next = rxm;
1188 /* If this is not the last buffer of the received packet,
1189 * update the pointer to the last mbuf of the current scattered
1190 * packet and continue to parse the RX ring.
1192 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1197 /* This is the last buffer of the received packet. If the CRC
1198 * is not stripped by the hardware:
1199 * - Subtract the CRC length from the total packet length.
1200 * - If the last buffer only contains the whole CRC or a part
1201 * of it, free the mbuf associated to the last buffer. If part
1202 * of the CRC is also contained in the previous mbuf, subtract
1203 * the length of that CRC part from the data length of the
1207 if (unlikely(rxq->crc_len > 0)) {
1208 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1209 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1210 rte_pktmbuf_free_seg(rxm);
1211 first_seg->nb_segs--;
1212 last_seg->data_len =
1213 (uint16_t)(last_seg->data_len -
1214 (RTE_ETHER_CRC_LEN - rx_packet_len));
1215 last_seg->next = NULL;
1217 rxm->data_len = (uint16_t)(rx_packet_len -
1222 first_seg->port = rxq->port_id;
1223 first_seg->ol_flags = 0;
1224 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1225 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1226 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1227 iavf_rxd_to_pkt_fields(first_seg, &rxd);
1228 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1230 first_seg->ol_flags |= pkt_flags;
1232 /* Prefetch data of first segment, if configured to do so. */
1233 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1234 first_seg->data_off));
1235 rx_pkts[nb_rx++] = first_seg;
1239 /* Record index of the next RX descriptor to probe. */
1240 rxq->rx_tail = rx_id;
1241 rxq->pkt_first_seg = first_seg;
1242 rxq->pkt_last_seg = last_seg;
1244 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1249 /* implement recv_scattered_pkts */
1251 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1254 struct iavf_rx_queue *rxq = rx_queue;
1255 union iavf_rx_desc rxd;
1256 struct rte_mbuf *rxe;
1257 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1258 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1259 struct rte_mbuf *nmb, *rxm;
1260 uint16_t rx_id = rxq->rx_tail;
1261 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1262 struct rte_eth_dev *dev;
1268 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1269 volatile union iavf_rx_desc *rxdp;
1270 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1272 while (nb_rx < nb_pkts) {
1273 rxdp = &rx_ring[rx_id];
1274 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1275 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1276 IAVF_RXD_QW1_STATUS_SHIFT;
1278 /* Check the DD bit */
1279 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1281 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1283 nmb = rte_mbuf_raw_alloc(rxq->mp);
1284 if (unlikely(!nmb)) {
1285 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1286 "queue_id=%u", rxq->port_id, rxq->queue_id);
1287 dev = &rte_eth_devices[rxq->port_id];
1288 dev->data->rx_mbuf_alloc_failed++;
1294 rxe = rxq->sw_ring[rx_id];
1296 if (rx_id == rxq->nb_rx_desc)
1299 /* Prefetch next mbuf */
1300 rte_prefetch0(rxq->sw_ring[rx_id]);
1302 /* When next RX descriptor is on a cache line boundary,
1303 * prefetch the next 4 RX descriptors and next 8 pointers
1306 if ((rx_id & 0x3) == 0) {
1307 rte_prefetch0(&rx_ring[rx_id]);
1308 rte_prefetch0(rxq->sw_ring[rx_id]);
1313 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1315 /* Set data buffer address and data length of the mbuf */
1316 rxdp->read.hdr_addr = 0;
1317 rxdp->read.pkt_addr = dma_addr;
1318 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1319 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1320 rxm->data_len = rx_packet_len;
1321 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1323 /* If this is the first buffer of the received packet, set the
1324 * pointer to the first mbuf of the packet and initialize its
1325 * context. Otherwise, update the total length and the number
1326 * of segments of the current scattered packet, and update the
1327 * pointer to the last mbuf of the current packet.
1331 first_seg->nb_segs = 1;
1332 first_seg->pkt_len = rx_packet_len;
1334 first_seg->pkt_len =
1335 (uint16_t)(first_seg->pkt_len +
1337 first_seg->nb_segs++;
1338 last_seg->next = rxm;
1341 /* If this is not the last buffer of the received packet,
1342 * update the pointer to the last mbuf of the current scattered
1343 * packet and continue to parse the RX ring.
1345 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1350 /* This is the last buffer of the received packet. If the CRC
1351 * is not stripped by the hardware:
1352 * - Subtract the CRC length from the total packet length.
1353 * - If the last buffer only contains the whole CRC or a part
1354 * of it, free the mbuf associated to the last buffer. If part
1355 * of the CRC is also contained in the previous mbuf, subtract
1356 * the length of that CRC part from the data length of the
1360 if (unlikely(rxq->crc_len > 0)) {
1361 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1362 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1363 rte_pktmbuf_free_seg(rxm);
1364 first_seg->nb_segs--;
1365 last_seg->data_len =
1366 (uint16_t)(last_seg->data_len -
1367 (RTE_ETHER_CRC_LEN - rx_packet_len));
1368 last_seg->next = NULL;
1370 rxm->data_len = (uint16_t)(rx_packet_len -
1374 first_seg->port = rxq->port_id;
1375 first_seg->ol_flags = 0;
1376 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1377 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1378 first_seg->packet_type =
1379 ptype_tbl[(uint8_t)((qword1 &
1380 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1382 if (pkt_flags & PKT_RX_RSS_HASH)
1383 first_seg->hash.rss =
1384 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1386 if (pkt_flags & PKT_RX_FDIR)
1387 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1389 first_seg->ol_flags |= pkt_flags;
1391 /* Prefetch data of first segment, if configured to do so. */
1392 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1393 first_seg->data_off));
1394 rx_pkts[nb_rx++] = first_seg;
1398 /* Record index of the next RX descriptor to probe. */
1399 rxq->rx_tail = rx_id;
1400 rxq->pkt_first_seg = first_seg;
1401 rxq->pkt_last_seg = last_seg;
1403 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1408 #define IAVF_LOOK_AHEAD 8
1410 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1412 volatile union iavf_rx_flex_desc *rxdp;
1413 struct rte_mbuf **rxep;
1414 struct rte_mbuf *mb;
1417 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1418 int32_t i, j, nb_rx = 0;
1420 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1422 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1423 rxep = &rxq->sw_ring[rxq->rx_tail];
1425 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1427 /* Make sure there is at least 1 packet to receive */
1428 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1431 /* Scan LOOK_AHEAD descriptors at a time to determine which
1432 * descriptors reference packets that are ready to be received.
1434 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1435 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1436 /* Read desc statuses backwards to avoid race condition */
1437 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1438 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1442 /* Compute how many status bits were set */
1443 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1444 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1448 /* Translate descriptor info to mbuf parameters */
1449 for (j = 0; j < nb_dd; j++) {
1450 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1452 i * IAVF_LOOK_AHEAD + j);
1455 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1456 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1457 mb->data_len = pkt_len;
1458 mb->pkt_len = pkt_len;
1461 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1462 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1463 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1464 iavf_rxd_to_pkt_fields(mb, &rxdp[j]);
1465 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1466 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1468 mb->ol_flags |= pkt_flags;
1471 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1472 rxq->rx_stage[i + j] = rxep[j];
1474 if (nb_dd != IAVF_LOOK_AHEAD)
1478 /* Clear software ring entries */
1479 for (i = 0; i < nb_rx; i++)
1480 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1486 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1488 volatile union iavf_rx_desc *rxdp;
1489 struct rte_mbuf **rxep;
1490 struct rte_mbuf *mb;
1494 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1495 int32_t i, j, nb_rx = 0;
1497 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1499 rxdp = &rxq->rx_ring[rxq->rx_tail];
1500 rxep = &rxq->sw_ring[rxq->rx_tail];
1502 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1503 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1504 IAVF_RXD_QW1_STATUS_SHIFT;
1506 /* Make sure there is at least 1 packet to receive */
1507 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1510 /* Scan LOOK_AHEAD descriptors at a time to determine which
1511 * descriptors reference packets that are ready to be received.
1513 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1514 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1515 /* Read desc statuses backwards to avoid race condition */
1516 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1517 qword1 = rte_le_to_cpu_64(
1518 rxdp[j].wb.qword1.status_error_len);
1519 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1520 IAVF_RXD_QW1_STATUS_SHIFT;
1525 /* Compute how many status bits were set */
1526 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1527 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1531 /* Translate descriptor info to mbuf parameters */
1532 for (j = 0; j < nb_dd; j++) {
1533 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1534 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1537 qword1 = rte_le_to_cpu_64
1538 (rxdp[j].wb.qword1.status_error_len);
1539 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1540 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1541 mb->data_len = pkt_len;
1542 mb->pkt_len = pkt_len;
1544 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1545 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1547 ptype_tbl[(uint8_t)((qword1 &
1548 IAVF_RXD_QW1_PTYPE_MASK) >>
1549 IAVF_RXD_QW1_PTYPE_SHIFT)];
1551 if (pkt_flags & PKT_RX_RSS_HASH)
1552 mb->hash.rss = rte_le_to_cpu_32(
1553 rxdp[j].wb.qword0.hi_dword.rss);
1555 if (pkt_flags & PKT_RX_FDIR)
1556 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1558 mb->ol_flags |= pkt_flags;
1561 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1562 rxq->rx_stage[i + j] = rxep[j];
1564 if (nb_dd != IAVF_LOOK_AHEAD)
1568 /* Clear software ring entries */
1569 for (i = 0; i < nb_rx; i++)
1570 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1575 static inline uint16_t
1576 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1577 struct rte_mbuf **rx_pkts,
1581 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1583 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1585 for (i = 0; i < nb_pkts; i++)
1586 rx_pkts[i] = stage[i];
1588 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1589 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1595 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1597 volatile union iavf_rx_desc *rxdp;
1598 struct rte_mbuf **rxep;
1599 struct rte_mbuf *mb;
1600 uint16_t alloc_idx, i;
1604 /* Allocate buffers in bulk */
1605 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1606 (rxq->rx_free_thresh - 1));
1607 rxep = &rxq->sw_ring[alloc_idx];
1608 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1609 rxq->rx_free_thresh);
1610 if (unlikely(diag != 0)) {
1611 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1615 rxdp = &rxq->rx_ring[alloc_idx];
1616 for (i = 0; i < rxq->rx_free_thresh; i++) {
1617 if (likely(i < (rxq->rx_free_thresh - 1)))
1618 /* Prefetch next mbuf */
1619 rte_prefetch0(rxep[i + 1]);
1622 rte_mbuf_refcnt_set(mb, 1);
1624 mb->data_off = RTE_PKTMBUF_HEADROOM;
1626 mb->port = rxq->port_id;
1627 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1628 rxdp[i].read.hdr_addr = 0;
1629 rxdp[i].read.pkt_addr = dma_addr;
1632 /* Update rx tail register */
1634 IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1636 rxq->rx_free_trigger =
1637 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1638 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1639 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1644 static inline uint16_t
1645 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1647 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1653 if (rxq->rx_nb_avail)
1654 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1656 if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1)
1657 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1659 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1660 rxq->rx_next_avail = 0;
1661 rxq->rx_nb_avail = nb_rx;
1662 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1664 if (rxq->rx_tail > rxq->rx_free_trigger) {
1665 if (iavf_rx_alloc_bufs(rxq) != 0) {
1668 /* TODO: count rx_mbuf_alloc_failed here */
1670 rxq->rx_nb_avail = 0;
1671 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1672 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1673 rxq->sw_ring[j] = rxq->rx_stage[i];
1679 if (rxq->rx_tail >= rxq->nb_rx_desc)
1682 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1683 rxq->port_id, rxq->queue_id,
1684 rxq->rx_tail, nb_rx);
1686 if (rxq->rx_nb_avail)
1687 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1693 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1694 struct rte_mbuf **rx_pkts,
1697 uint16_t nb_rx = 0, n, count;
1699 if (unlikely(nb_pkts == 0))
1702 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1703 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1706 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1707 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1708 nb_rx = (uint16_t)(nb_rx + count);
1709 nb_pkts = (uint16_t)(nb_pkts - count);
1718 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1720 struct iavf_tx_entry *sw_ring = txq->sw_ring;
1721 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1722 uint16_t nb_tx_desc = txq->nb_tx_desc;
1723 uint16_t desc_to_clean_to;
1724 uint16_t nb_tx_to_clean;
1726 volatile struct iavf_tx_desc *txd = txq->tx_ring;
1728 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1729 if (desc_to_clean_to >= nb_tx_desc)
1730 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1732 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1733 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1734 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1735 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1736 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1737 "(port=%d queue=%d)", desc_to_clean_to,
1738 txq->port_id, txq->queue_id);
1742 if (last_desc_cleaned > desc_to_clean_to)
1743 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1746 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1749 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1751 txq->last_desc_cleaned = desc_to_clean_to;
1752 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1757 /* Check if the context descriptor is needed for TX offloading */
1758 static inline uint16_t
1759 iavf_calc_context_desc(uint64_t flags)
1761 static uint64_t mask = PKT_TX_TCP_SEG;
1763 return (flags & mask) ? 1 : 0;
1767 iavf_txd_enable_checksum(uint64_t ol_flags,
1769 uint32_t *td_offset,
1770 union iavf_tx_offload tx_offload)
1773 *td_offset |= (tx_offload.l2_len >> 1) <<
1774 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1776 /* Enable L3 checksum offloads */
1777 if (ol_flags & PKT_TX_IP_CKSUM) {
1778 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1779 *td_offset |= (tx_offload.l3_len >> 2) <<
1780 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1781 } else if (ol_flags & PKT_TX_IPV4) {
1782 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
1783 *td_offset |= (tx_offload.l3_len >> 2) <<
1784 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1785 } else if (ol_flags & PKT_TX_IPV6) {
1786 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1787 *td_offset |= (tx_offload.l3_len >> 2) <<
1788 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1791 if (ol_flags & PKT_TX_TCP_SEG) {
1792 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1793 *td_offset |= (tx_offload.l4_len >> 2) <<
1794 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1798 /* Enable L4 checksum offloads */
1799 switch (ol_flags & PKT_TX_L4_MASK) {
1800 case PKT_TX_TCP_CKSUM:
1801 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1802 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
1803 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1805 case PKT_TX_SCTP_CKSUM:
1806 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
1807 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
1808 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1810 case PKT_TX_UDP_CKSUM:
1811 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
1812 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
1813 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1820 /* set TSO context descriptor
1821 * support IP -> L4 and IP -> IP -> L4
1823 static inline uint64_t
1824 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
1826 uint64_t ctx_desc = 0;
1827 uint32_t cd_cmd, hdr_len, cd_tso_len;
1829 if (!tx_offload.l4_len) {
1830 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1834 hdr_len = tx_offload.l2_len +
1838 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1839 cd_tso_len = mbuf->pkt_len - hdr_len;
1840 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1841 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1842 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1847 /* Construct the tx flags */
1848 static inline uint64_t
1849 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
1852 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
1853 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
1854 ((uint64_t)td_offset <<
1855 IAVF_TXD_QW1_OFFSET_SHIFT) |
1857 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
1858 ((uint64_t)td_tag <<
1859 IAVF_TXD_QW1_L2TAG1_SHIFT));
1864 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1866 volatile struct iavf_tx_desc *txd;
1867 volatile struct iavf_tx_desc *txr;
1868 struct iavf_tx_queue *txq;
1869 struct iavf_tx_entry *sw_ring;
1870 struct iavf_tx_entry *txe, *txn;
1871 struct rte_mbuf *tx_pkt;
1872 struct rte_mbuf *m_seg;
1883 uint64_t buf_dma_addr;
1884 union iavf_tx_offload tx_offload = {0};
1887 sw_ring = txq->sw_ring;
1889 tx_id = txq->tx_tail;
1890 txe = &sw_ring[tx_id];
1892 /* Check if the descriptor ring needs to be cleaned. */
1893 if (txq->nb_free < txq->free_thresh)
1894 iavf_xmit_cleanup(txq);
1896 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1901 tx_pkt = *tx_pkts++;
1902 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1904 ol_flags = tx_pkt->ol_flags;
1905 tx_offload.l2_len = tx_pkt->l2_len;
1906 tx_offload.l3_len = tx_pkt->l3_len;
1907 tx_offload.l4_len = tx_pkt->l4_len;
1908 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1909 /* Calculate the number of context descriptors needed. */
1910 nb_ctx = iavf_calc_context_desc(ol_flags);
1912 /* The number of descriptors that must be allocated for
1913 * a packet equals to the number of the segments of that
1914 * packet plus 1 context descriptor if needed.
1916 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1917 tx_last = (uint16_t)(tx_id + nb_used - 1);
1920 if (tx_last >= txq->nb_tx_desc)
1921 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1923 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
1924 " tx_first=%u tx_last=%u",
1925 txq->port_id, txq->queue_id, tx_id, tx_last);
1927 if (nb_used > txq->nb_free) {
1928 if (iavf_xmit_cleanup(txq)) {
1933 if (unlikely(nb_used > txq->rs_thresh)) {
1934 while (nb_used > txq->nb_free) {
1935 if (iavf_xmit_cleanup(txq)) {
1944 /* Descriptor based VLAN insertion */
1945 if (ol_flags & PKT_TX_VLAN_PKT) {
1946 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
1947 td_tag = tx_pkt->vlan_tci;
1950 /* According to datasheet, the bit2 is reserved and must be
1955 /* Enable checksum offloading */
1956 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
1957 iavf_txd_enable_checksum(ol_flags, &td_cmd,
1958 &td_offset, tx_offload);
1961 /* Setup TX context descriptor if required */
1962 uint64_t cd_type_cmd_tso_mss =
1963 IAVF_TX_DESC_DTYPE_CONTEXT;
1964 volatile struct iavf_tx_context_desc *ctx_txd =
1965 (volatile struct iavf_tx_context_desc *)
1968 txn = &sw_ring[txe->next_id];
1969 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1971 rte_pktmbuf_free_seg(txe->mbuf);
1976 if (ol_flags & PKT_TX_TCP_SEG)
1977 cd_type_cmd_tso_mss |=
1978 iavf_set_tso_ctx(tx_pkt, tx_offload);
1980 ctx_txd->type_cmd_tso_mss =
1981 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1983 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
1984 txe->last_id = tx_last;
1985 tx_id = txe->next_id;
1992 txn = &sw_ring[txe->next_id];
1995 rte_pktmbuf_free_seg(txe->mbuf);
1998 /* Setup TX Descriptor */
1999 slen = m_seg->data_len;
2000 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2001 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2002 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2007 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2008 txe->last_id = tx_last;
2009 tx_id = txe->next_id;
2011 m_seg = m_seg->next;
2014 /* The last packet data descriptor needs End Of Packet (EOP) */
2015 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2016 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2017 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2019 if (txq->nb_used >= txq->rs_thresh) {
2020 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2021 "%4u (port=%d queue=%d)",
2022 tx_last, txq->port_id, txq->queue_id);
2024 td_cmd |= IAVF_TX_DESC_CMD_RS;
2026 /* Update txq RS bit counters */
2030 txd->cmd_type_offset_bsz |=
2031 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2032 IAVF_TXD_QW1_CMD_SHIFT);
2033 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2039 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2040 txq->port_id, txq->queue_id, tx_id, nb_tx);
2042 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
2043 txq->tx_tail = tx_id;
2048 /* TX prep functions */
2050 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2057 for (i = 0; i < nb_pkts; i++) {
2059 ol_flags = m->ol_flags;
2061 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2062 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2063 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2067 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2068 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2069 /* MSS outside the range are considered malicious */
2074 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2075 rte_errno = ENOTSUP;
2079 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2080 ret = rte_validate_tx_offload(m);
2086 ret = rte_net_intel_cksum_prepare(m);
2096 /* choose rx function*/
2098 iavf_set_rx_function(struct rte_eth_dev *dev)
2100 struct iavf_adapter *adapter =
2101 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2102 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2104 struct iavf_rx_queue *rxq;
2106 bool use_avx2 = false;
2108 if (!iavf_rx_vec_dev_check(dev) &&
2109 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2110 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2111 rxq = dev->data->rx_queues[i];
2112 (void)iavf_rxq_vec_setup(rxq);
2115 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2116 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2117 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2120 if (dev->data->scattered_rx) {
2122 "Using %sVector Scattered Rx (port %d).",
2123 use_avx2 ? "avx2 " : "",
2124 dev->data->port_id);
2125 if (vf->vf_res->vf_cap_flags &
2126 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2127 dev->rx_pkt_burst = use_avx2 ?
2128 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2129 iavf_recv_scattered_pkts_vec_flex_rxd;
2131 dev->rx_pkt_burst = use_avx2 ?
2132 iavf_recv_scattered_pkts_vec_avx2 :
2133 iavf_recv_scattered_pkts_vec;
2135 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2136 use_avx2 ? "avx2 " : "",
2137 dev->data->port_id);
2138 if (vf->vf_res->vf_cap_flags &
2139 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2140 dev->rx_pkt_burst = use_avx2 ?
2141 iavf_recv_pkts_vec_avx2_flex_rxd :
2142 iavf_recv_pkts_vec_flex_rxd;
2144 dev->rx_pkt_burst = use_avx2 ?
2145 iavf_recv_pkts_vec_avx2 :
2153 if (dev->data->scattered_rx) {
2154 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2155 dev->data->port_id);
2156 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2157 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2159 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2160 } else if (adapter->rx_bulk_alloc_allowed) {
2161 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2162 dev->data->port_id);
2163 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2165 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2166 dev->data->port_id);
2167 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2168 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2170 dev->rx_pkt_burst = iavf_recv_pkts;
2174 /* choose tx function*/
2176 iavf_set_tx_function(struct rte_eth_dev *dev)
2179 struct iavf_tx_queue *txq;
2181 bool use_avx2 = false;
2183 if (!iavf_tx_vec_dev_check(dev) &&
2184 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2185 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2186 txq = dev->data->tx_queues[i];
2189 iavf_txq_vec_setup(txq);
2192 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2193 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2194 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2197 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2198 use_avx2 ? "avx2 " : "",
2199 dev->data->port_id);
2200 dev->tx_pkt_burst = use_avx2 ?
2201 iavf_xmit_pkts_vec_avx2 :
2203 dev->tx_pkt_prepare = NULL;
2209 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2210 dev->data->port_id);
2211 dev->tx_pkt_burst = iavf_xmit_pkts;
2212 dev->tx_pkt_prepare = iavf_prep_pkts;
2216 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2219 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2220 uint16_t i, tx_last, tx_id;
2221 uint16_t nb_tx_free_last;
2222 uint16_t nb_tx_to_clean;
2225 /* Start free mbuf from the next of tx_tail */
2226 tx_last = txq->tx_tail;
2227 tx_id = swr_ring[tx_last].next_id;
2229 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2232 nb_tx_to_clean = txq->nb_free;
2233 nb_tx_free_last = txq->nb_free;
2235 free_cnt = txq->nb_tx_desc;
2237 /* Loop through swr_ring to count the amount of
2238 * freeable mubfs and packets.
2240 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2241 for (i = 0; i < nb_tx_to_clean &&
2242 pkt_cnt < free_cnt &&
2243 tx_id != tx_last; i++) {
2244 if (swr_ring[tx_id].mbuf != NULL) {
2245 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2246 swr_ring[tx_id].mbuf = NULL;
2249 * last segment in the packet,
2250 * increment packet count
2252 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2255 tx_id = swr_ring[tx_id].next_id;
2258 if (txq->rs_thresh > txq->nb_tx_desc -
2259 txq->nb_free || tx_id == tx_last)
2262 if (pkt_cnt < free_cnt) {
2263 if (iavf_xmit_cleanup(txq))
2266 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2267 nb_tx_free_last = txq->nb_free;
2271 return (int)pkt_cnt;
2275 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2277 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2279 return iavf_tx_done_cleanup_full(q, free_cnt);
2283 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2284 struct rte_eth_rxq_info *qinfo)
2286 struct iavf_rx_queue *rxq;
2288 rxq = dev->data->rx_queues[queue_id];
2290 qinfo->mp = rxq->mp;
2291 qinfo->scattered_rx = dev->data->scattered_rx;
2292 qinfo->nb_desc = rxq->nb_rx_desc;
2294 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2295 qinfo->conf.rx_drop_en = true;
2296 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2300 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2301 struct rte_eth_txq_info *qinfo)
2303 struct iavf_tx_queue *txq;
2305 txq = dev->data->tx_queues[queue_id];
2307 qinfo->nb_desc = txq->nb_tx_desc;
2309 qinfo->conf.tx_free_thresh = txq->free_thresh;
2310 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2311 qinfo->conf.offloads = txq->offloads;
2312 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2315 /* Get the number of used descriptors of a rx queue */
2317 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2319 #define IAVF_RXQ_SCAN_INTERVAL 4
2320 volatile union iavf_rx_desc *rxdp;
2321 struct iavf_rx_queue *rxq;
2324 rxq = dev->data->rx_queues[queue_id];
2325 rxdp = &rxq->rx_ring[rxq->rx_tail];
2327 while ((desc < rxq->nb_rx_desc) &&
2328 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2329 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2330 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2331 /* Check the DD bit of a rx descriptor of each 4 in a group,
2332 * to avoid checking too frequently and downgrading performance
2335 desc += IAVF_RXQ_SCAN_INTERVAL;
2336 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2337 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2338 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2339 desc - rxq->nb_rx_desc]);
2346 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2348 struct iavf_rx_queue *rxq = rx_queue;
2349 volatile uint64_t *status;
2353 if (unlikely(offset >= rxq->nb_rx_desc))
2356 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2357 return RTE_ETH_RX_DESC_UNAVAIL;
2359 desc = rxq->rx_tail + offset;
2360 if (desc >= rxq->nb_rx_desc)
2361 desc -= rxq->nb_rx_desc;
2363 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2364 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2365 << IAVF_RXD_QW1_STATUS_SHIFT);
2367 return RTE_ETH_RX_DESC_DONE;
2369 return RTE_ETH_RX_DESC_AVAIL;
2373 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2375 struct iavf_tx_queue *txq = tx_queue;
2376 volatile uint64_t *status;
2377 uint64_t mask, expect;
2380 if (unlikely(offset >= txq->nb_tx_desc))
2383 desc = txq->tx_tail + offset;
2384 /* go to next desc that has the RS bit */
2385 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2387 if (desc >= txq->nb_tx_desc) {
2388 desc -= txq->nb_tx_desc;
2389 if (desc >= txq->nb_tx_desc)
2390 desc -= txq->nb_tx_desc;
2393 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2394 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2395 expect = rte_cpu_to_le_64(
2396 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2397 if ((*status & mask) == expect)
2398 return RTE_ETH_TX_DESC_DONE;
2400 return RTE_ETH_TX_DESC_FULL;
2404 iavf_get_default_ptype_table(void)
2406 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2407 __rte_cache_aligned = {
2410 [1] = RTE_PTYPE_L2_ETHER,
2411 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2412 /* [3] - [5] reserved */
2413 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2414 /* [7] - [10] reserved */
2415 [11] = RTE_PTYPE_L2_ETHER_ARP,
2416 /* [12] - [21] reserved */
2418 /* Non tunneled IPv4 */
2419 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2421 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2422 RTE_PTYPE_L4_NONFRAG,
2423 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2426 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2428 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2430 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2434 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2435 RTE_PTYPE_TUNNEL_IP |
2436 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2437 RTE_PTYPE_INNER_L4_FRAG,
2438 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2439 RTE_PTYPE_TUNNEL_IP |
2440 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2441 RTE_PTYPE_INNER_L4_NONFRAG,
2442 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2443 RTE_PTYPE_TUNNEL_IP |
2444 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2445 RTE_PTYPE_INNER_L4_UDP,
2447 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2448 RTE_PTYPE_TUNNEL_IP |
2449 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2450 RTE_PTYPE_INNER_L4_TCP,
2451 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2452 RTE_PTYPE_TUNNEL_IP |
2453 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2454 RTE_PTYPE_INNER_L4_SCTP,
2455 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2456 RTE_PTYPE_TUNNEL_IP |
2457 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2458 RTE_PTYPE_INNER_L4_ICMP,
2461 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2462 RTE_PTYPE_TUNNEL_IP |
2463 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2464 RTE_PTYPE_INNER_L4_FRAG,
2465 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2466 RTE_PTYPE_TUNNEL_IP |
2467 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2468 RTE_PTYPE_INNER_L4_NONFRAG,
2469 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2470 RTE_PTYPE_TUNNEL_IP |
2471 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2472 RTE_PTYPE_INNER_L4_UDP,
2474 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2475 RTE_PTYPE_TUNNEL_IP |
2476 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2477 RTE_PTYPE_INNER_L4_TCP,
2478 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2479 RTE_PTYPE_TUNNEL_IP |
2480 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2481 RTE_PTYPE_INNER_L4_SCTP,
2482 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2483 RTE_PTYPE_TUNNEL_IP |
2484 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2485 RTE_PTYPE_INNER_L4_ICMP,
2487 /* IPv4 --> GRE/Teredo/VXLAN */
2488 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2489 RTE_PTYPE_TUNNEL_GRENAT,
2491 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2492 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2493 RTE_PTYPE_TUNNEL_GRENAT |
2494 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2495 RTE_PTYPE_INNER_L4_FRAG,
2496 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2497 RTE_PTYPE_TUNNEL_GRENAT |
2498 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2499 RTE_PTYPE_INNER_L4_NONFRAG,
2500 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2501 RTE_PTYPE_TUNNEL_GRENAT |
2502 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2503 RTE_PTYPE_INNER_L4_UDP,
2505 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2506 RTE_PTYPE_TUNNEL_GRENAT |
2507 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2508 RTE_PTYPE_INNER_L4_TCP,
2509 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2510 RTE_PTYPE_TUNNEL_GRENAT |
2511 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2512 RTE_PTYPE_INNER_L4_SCTP,
2513 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2514 RTE_PTYPE_TUNNEL_GRENAT |
2515 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2516 RTE_PTYPE_INNER_L4_ICMP,
2518 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2519 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2520 RTE_PTYPE_TUNNEL_GRENAT |
2521 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2522 RTE_PTYPE_INNER_L4_FRAG,
2523 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2524 RTE_PTYPE_TUNNEL_GRENAT |
2525 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2526 RTE_PTYPE_INNER_L4_NONFRAG,
2527 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2528 RTE_PTYPE_TUNNEL_GRENAT |
2529 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2530 RTE_PTYPE_INNER_L4_UDP,
2532 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2533 RTE_PTYPE_TUNNEL_GRENAT |
2534 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2535 RTE_PTYPE_INNER_L4_TCP,
2536 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2537 RTE_PTYPE_TUNNEL_GRENAT |
2538 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2539 RTE_PTYPE_INNER_L4_SCTP,
2540 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2541 RTE_PTYPE_TUNNEL_GRENAT |
2542 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2543 RTE_PTYPE_INNER_L4_ICMP,
2545 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2546 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2547 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2549 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2550 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2551 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2552 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2553 RTE_PTYPE_INNER_L4_FRAG,
2554 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2555 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2556 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2557 RTE_PTYPE_INNER_L4_NONFRAG,
2558 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2559 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2560 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2561 RTE_PTYPE_INNER_L4_UDP,
2563 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2564 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2565 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2566 RTE_PTYPE_INNER_L4_TCP,
2567 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2568 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2569 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2570 RTE_PTYPE_INNER_L4_SCTP,
2571 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2572 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2573 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2574 RTE_PTYPE_INNER_L4_ICMP,
2576 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2577 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2578 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2579 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2580 RTE_PTYPE_INNER_L4_FRAG,
2581 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2582 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2583 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2584 RTE_PTYPE_INNER_L4_NONFRAG,
2585 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2586 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2587 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2588 RTE_PTYPE_INNER_L4_UDP,
2590 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2591 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2592 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2593 RTE_PTYPE_INNER_L4_TCP,
2594 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2595 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2596 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2597 RTE_PTYPE_INNER_L4_SCTP,
2598 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2599 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2600 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2601 RTE_PTYPE_INNER_L4_ICMP,
2602 /* [73] - [87] reserved */
2604 /* Non tunneled IPv6 */
2605 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2607 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2608 RTE_PTYPE_L4_NONFRAG,
2609 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2612 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2614 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2616 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2620 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2621 RTE_PTYPE_TUNNEL_IP |
2622 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2623 RTE_PTYPE_INNER_L4_FRAG,
2624 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2625 RTE_PTYPE_TUNNEL_IP |
2626 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2627 RTE_PTYPE_INNER_L4_NONFRAG,
2628 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2629 RTE_PTYPE_TUNNEL_IP |
2630 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2631 RTE_PTYPE_INNER_L4_UDP,
2633 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2634 RTE_PTYPE_TUNNEL_IP |
2635 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2636 RTE_PTYPE_INNER_L4_TCP,
2637 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2638 RTE_PTYPE_TUNNEL_IP |
2639 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2640 RTE_PTYPE_INNER_L4_SCTP,
2641 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2642 RTE_PTYPE_TUNNEL_IP |
2643 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2644 RTE_PTYPE_INNER_L4_ICMP,
2647 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2648 RTE_PTYPE_TUNNEL_IP |
2649 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2650 RTE_PTYPE_INNER_L4_FRAG,
2651 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2652 RTE_PTYPE_TUNNEL_IP |
2653 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2654 RTE_PTYPE_INNER_L4_NONFRAG,
2655 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2656 RTE_PTYPE_TUNNEL_IP |
2657 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2658 RTE_PTYPE_INNER_L4_UDP,
2659 /* [105] reserved */
2660 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2661 RTE_PTYPE_TUNNEL_IP |
2662 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2663 RTE_PTYPE_INNER_L4_TCP,
2664 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2665 RTE_PTYPE_TUNNEL_IP |
2666 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2667 RTE_PTYPE_INNER_L4_SCTP,
2668 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2669 RTE_PTYPE_TUNNEL_IP |
2670 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2671 RTE_PTYPE_INNER_L4_ICMP,
2673 /* IPv6 --> GRE/Teredo/VXLAN */
2674 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2675 RTE_PTYPE_TUNNEL_GRENAT,
2677 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2678 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2679 RTE_PTYPE_TUNNEL_GRENAT |
2680 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2681 RTE_PTYPE_INNER_L4_FRAG,
2682 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2683 RTE_PTYPE_TUNNEL_GRENAT |
2684 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2685 RTE_PTYPE_INNER_L4_NONFRAG,
2686 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2687 RTE_PTYPE_TUNNEL_GRENAT |
2688 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2689 RTE_PTYPE_INNER_L4_UDP,
2690 /* [113] reserved */
2691 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2692 RTE_PTYPE_TUNNEL_GRENAT |
2693 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2694 RTE_PTYPE_INNER_L4_TCP,
2695 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2696 RTE_PTYPE_TUNNEL_GRENAT |
2697 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2698 RTE_PTYPE_INNER_L4_SCTP,
2699 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2700 RTE_PTYPE_TUNNEL_GRENAT |
2701 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2702 RTE_PTYPE_INNER_L4_ICMP,
2704 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2705 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2706 RTE_PTYPE_TUNNEL_GRENAT |
2707 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2708 RTE_PTYPE_INNER_L4_FRAG,
2709 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2710 RTE_PTYPE_TUNNEL_GRENAT |
2711 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2712 RTE_PTYPE_INNER_L4_NONFRAG,
2713 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2714 RTE_PTYPE_TUNNEL_GRENAT |
2715 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2716 RTE_PTYPE_INNER_L4_UDP,
2717 /* [120] reserved */
2718 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2719 RTE_PTYPE_TUNNEL_GRENAT |
2720 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2721 RTE_PTYPE_INNER_L4_TCP,
2722 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2723 RTE_PTYPE_TUNNEL_GRENAT |
2724 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2725 RTE_PTYPE_INNER_L4_SCTP,
2726 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2727 RTE_PTYPE_TUNNEL_GRENAT |
2728 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2729 RTE_PTYPE_INNER_L4_ICMP,
2731 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2732 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2733 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2735 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2736 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2737 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2738 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2739 RTE_PTYPE_INNER_L4_FRAG,
2740 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2741 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2742 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2743 RTE_PTYPE_INNER_L4_NONFRAG,
2744 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2745 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2746 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2747 RTE_PTYPE_INNER_L4_UDP,
2748 /* [128] reserved */
2749 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2750 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2751 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2752 RTE_PTYPE_INNER_L4_TCP,
2753 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2754 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2755 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2756 RTE_PTYPE_INNER_L4_SCTP,
2757 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2758 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2759 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2760 RTE_PTYPE_INNER_L4_ICMP,
2762 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2763 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2764 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2765 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2766 RTE_PTYPE_INNER_L4_FRAG,
2767 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2768 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2769 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2770 RTE_PTYPE_INNER_L4_NONFRAG,
2771 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2772 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2773 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2774 RTE_PTYPE_INNER_L4_UDP,
2775 /* [135] reserved */
2776 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2777 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2778 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2779 RTE_PTYPE_INNER_L4_TCP,
2780 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2781 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2782 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2783 RTE_PTYPE_INNER_L4_SCTP,
2784 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2785 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2786 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2787 RTE_PTYPE_INNER_L4_ICMP,
2788 /* [139] - [299] reserved */
2791 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
2792 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
2794 /* PPPoE --> IPv4 */
2795 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
2796 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2798 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
2799 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2800 RTE_PTYPE_L4_NONFRAG,
2801 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
2802 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2804 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
2805 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2807 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
2808 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2810 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
2811 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2814 /* PPPoE --> IPv6 */
2815 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
2816 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2818 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
2819 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2820 RTE_PTYPE_L4_NONFRAG,
2821 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
2822 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2824 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
2825 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2827 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
2828 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2830 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
2831 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2833 /* [314] - [324] reserved */
2835 /* IPv4/IPv6 --> GTPC/GTPU */
2836 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2837 RTE_PTYPE_TUNNEL_GTPC,
2838 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2839 RTE_PTYPE_TUNNEL_GTPC,
2840 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2841 RTE_PTYPE_TUNNEL_GTPC,
2842 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2843 RTE_PTYPE_TUNNEL_GTPC,
2844 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2845 RTE_PTYPE_TUNNEL_GTPU,
2846 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2847 RTE_PTYPE_TUNNEL_GTPU,
2849 /* IPv4 --> GTPU --> IPv4 */
2850 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2851 RTE_PTYPE_TUNNEL_GTPU |
2852 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2853 RTE_PTYPE_INNER_L4_FRAG,
2854 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2855 RTE_PTYPE_TUNNEL_GTPU |
2856 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2857 RTE_PTYPE_INNER_L4_NONFRAG,
2858 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2859 RTE_PTYPE_TUNNEL_GTPU |
2860 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2861 RTE_PTYPE_INNER_L4_UDP,
2862 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2863 RTE_PTYPE_TUNNEL_GTPU |
2864 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2865 RTE_PTYPE_INNER_L4_TCP,
2866 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2867 RTE_PTYPE_TUNNEL_GTPU |
2868 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2869 RTE_PTYPE_INNER_L4_ICMP,
2871 /* IPv6 --> GTPU --> IPv4 */
2872 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2873 RTE_PTYPE_TUNNEL_GTPU |
2874 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2875 RTE_PTYPE_INNER_L4_FRAG,
2876 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2877 RTE_PTYPE_TUNNEL_GTPU |
2878 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2879 RTE_PTYPE_INNER_L4_NONFRAG,
2880 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2881 RTE_PTYPE_TUNNEL_GTPU |
2882 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2883 RTE_PTYPE_INNER_L4_UDP,
2884 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2885 RTE_PTYPE_TUNNEL_GTPU |
2886 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2887 RTE_PTYPE_INNER_L4_TCP,
2888 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2889 RTE_PTYPE_TUNNEL_GTPU |
2890 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2891 RTE_PTYPE_INNER_L4_ICMP,
2893 /* IPv4 --> GTPU --> IPv6 */
2894 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2895 RTE_PTYPE_TUNNEL_GTPU |
2896 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2897 RTE_PTYPE_INNER_L4_FRAG,
2898 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2899 RTE_PTYPE_TUNNEL_GTPU |
2900 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2901 RTE_PTYPE_INNER_L4_NONFRAG,
2902 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2903 RTE_PTYPE_TUNNEL_GTPU |
2904 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2905 RTE_PTYPE_INNER_L4_UDP,
2906 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2907 RTE_PTYPE_TUNNEL_GTPU |
2908 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2909 RTE_PTYPE_INNER_L4_TCP,
2910 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2911 RTE_PTYPE_TUNNEL_GTPU |
2912 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2913 RTE_PTYPE_INNER_L4_ICMP,
2915 /* IPv6 --> GTPU --> IPv6 */
2916 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2917 RTE_PTYPE_TUNNEL_GTPU |
2918 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2919 RTE_PTYPE_INNER_L4_FRAG,
2920 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2921 RTE_PTYPE_TUNNEL_GTPU |
2922 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2923 RTE_PTYPE_INNER_L4_NONFRAG,
2924 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2925 RTE_PTYPE_TUNNEL_GTPU |
2926 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2927 RTE_PTYPE_INNER_L4_UDP,
2928 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2929 RTE_PTYPE_TUNNEL_GTPU |
2930 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2931 RTE_PTYPE_INNER_L4_TCP,
2932 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2933 RTE_PTYPE_TUNNEL_GTPU |
2934 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2935 RTE_PTYPE_INNER_L4_ICMP,
2936 /* All others reserved */