1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
28 #include "iavf_rxtx.h"
31 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
33 /* The following constraints must be satisfied:
34 * thresh < rxq->nb_rx_desc
36 if (thresh >= nb_desc) {
37 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
45 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
46 uint16_t tx_free_thresh)
48 /* TX descriptors will have their RS bit set after tx_rs_thresh
49 * descriptors have been used. The TX descriptor ring will be cleaned
50 * after tx_free_thresh descriptors are used or if the number of
51 * descriptors required to transmit a packet is greater than the
52 * number of free TX descriptors.
54 * The following constraints must be satisfied:
55 * - tx_rs_thresh must be less than the size of the ring minus 2.
56 * - tx_free_thresh must be less than the size of the ring minus 3.
57 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
58 * - tx_rs_thresh must be a divisor of the ring size.
60 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
61 * race condition, hence the maximum threshold constraints. When set
62 * to zero use default values.
64 if (tx_rs_thresh >= (nb_desc - 2)) {
65 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
66 "number of TX descriptors (%u) minus 2",
67 tx_rs_thresh, nb_desc);
70 if (tx_free_thresh >= (nb_desc - 3)) {
71 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
72 "number of TX descriptors (%u) minus 3.",
73 tx_free_thresh, nb_desc);
76 if (tx_rs_thresh > tx_free_thresh) {
77 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
78 "equal to tx_free_thresh (%u).",
79 tx_rs_thresh, tx_free_thresh);
82 if ((nb_desc % tx_rs_thresh) != 0) {
83 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
84 "number of TX descriptors (%u).",
85 tx_rs_thresh, nb_desc);
93 check_rx_vec_allow(struct iavf_rx_queue *rxq)
95 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
96 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
97 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
101 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
106 check_tx_vec_allow(struct iavf_tx_queue *txq)
108 if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
109 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
110 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
111 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
114 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
119 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
123 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
124 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
125 "rxq->rx_free_thresh=%d, "
126 "IAVF_RX_MAX_BURST=%d",
127 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
129 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
130 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
131 "rxq->nb_rx_desc=%d, "
132 "rxq->rx_free_thresh=%d",
133 rxq->nb_rx_desc, rxq->rx_free_thresh);
140 reset_rx_queue(struct iavf_rx_queue *rxq)
148 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
150 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
151 ((volatile char *)rxq->rx_ring)[i] = 0;
153 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
155 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
156 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
159 rxq->rx_nb_avail = 0;
160 rxq->rx_next_avail = 0;
161 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
165 rxq->pkt_first_seg = NULL;
166 rxq->pkt_last_seg = NULL;
170 reset_tx_queue(struct iavf_tx_queue *txq)
172 struct iavf_tx_entry *txe;
177 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
182 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
183 for (i = 0; i < size; i++)
184 ((volatile char *)txq->tx_ring)[i] = 0;
186 prev = (uint16_t)(txq->nb_tx_desc - 1);
187 for (i = 0; i < txq->nb_tx_desc; i++) {
188 txq->tx_ring[i].cmd_type_offset_bsz =
189 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
192 txe[prev].next_id = i;
199 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
200 txq->nb_free = txq->nb_tx_desc - 1;
202 txq->next_dd = txq->rs_thresh - 1;
203 txq->next_rs = txq->rs_thresh - 1;
207 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
209 volatile union iavf_rx_desc *rxd;
210 struct rte_mbuf *mbuf = NULL;
214 for (i = 0; i < rxq->nb_rx_desc; i++) {
215 mbuf = rte_mbuf_raw_alloc(rxq->mp);
216 if (unlikely(!mbuf)) {
217 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
221 rte_mbuf_refcnt_set(mbuf, 1);
223 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
225 mbuf->port = rxq->port_id;
228 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
230 rxd = &rxq->rx_ring[i];
231 rxd->read.pkt_addr = dma_addr;
232 rxd->read.hdr_addr = 0;
233 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
238 rxq->sw_ring[i] = mbuf;
245 release_rxq_mbufs(struct iavf_rx_queue *rxq)
252 for (i = 0; i < rxq->nb_rx_desc; i++) {
253 if (rxq->sw_ring[i]) {
254 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
255 rxq->sw_ring[i] = NULL;
260 if (rxq->rx_nb_avail == 0)
262 for (i = 0; i < rxq->rx_nb_avail; i++) {
263 struct rte_mbuf *mbuf;
265 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
266 rte_pktmbuf_free_seg(mbuf);
268 rxq->rx_nb_avail = 0;
272 release_txq_mbufs(struct iavf_tx_queue *txq)
276 if (!txq || !txq->sw_ring) {
277 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
281 for (i = 0; i < txq->nb_tx_desc; i++) {
282 if (txq->sw_ring[i].mbuf) {
283 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
284 txq->sw_ring[i].mbuf = NULL;
289 static const struct iavf_rxq_ops def_rxq_ops = {
290 .release_mbufs = release_rxq_mbufs,
293 static const struct iavf_txq_ops def_txq_ops = {
294 .release_mbufs = release_txq_mbufs,
298 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
299 uint16_t nb_desc, unsigned int socket_id,
300 const struct rte_eth_rxconf *rx_conf,
301 struct rte_mempool *mp)
303 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
304 struct iavf_adapter *ad =
305 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
306 struct iavf_info *vf =
307 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
308 struct iavf_vsi *vsi = &vf->vsi;
309 struct iavf_rx_queue *rxq;
310 const struct rte_memzone *mz;
313 uint16_t rx_free_thresh;
315 PMD_INIT_FUNC_TRACE();
317 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
318 nb_desc > IAVF_MAX_RING_DESC ||
319 nb_desc < IAVF_MIN_RING_DESC) {
320 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
325 /* Check free threshold */
326 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
327 IAVF_DEFAULT_RX_FREE_THRESH :
328 rx_conf->rx_free_thresh;
329 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
332 /* Free memory if needed */
333 if (dev->data->rx_queues[queue_idx]) {
334 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
335 dev->data->rx_queues[queue_idx] = NULL;
338 /* Allocate the rx queue data structure */
339 rxq = rte_zmalloc_socket("iavf rxq",
340 sizeof(struct iavf_rx_queue),
344 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
345 "rx queue data structure");
350 rxq->nb_rx_desc = nb_desc;
351 rxq->rx_free_thresh = rx_free_thresh;
352 rxq->queue_id = queue_idx;
353 rxq->port_id = dev->data->port_id;
354 rxq->crc_len = 0; /* crc stripping by default */
355 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
359 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
360 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
362 /* Allocate the software ring. */
363 len = nb_desc + IAVF_RX_MAX_BURST;
365 rte_zmalloc_socket("iavf rx sw ring",
366 sizeof(struct rte_mbuf *) * len,
370 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
375 /* Allocate the maximun number of RX ring hardware descriptor with
376 * a liitle more to support bulk allocate.
378 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
379 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
381 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
382 ring_size, IAVF_RING_BASE_ALIGN,
385 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
386 rte_free(rxq->sw_ring);
390 /* Zero all the descriptors in the ring. */
391 memset(mz->addr, 0, ring_size);
392 rxq->rx_ring_phys_addr = mz->iova;
393 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
398 dev->data->rx_queues[queue_idx] = rxq;
399 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
400 rxq->ops = &def_rxq_ops;
402 if (check_rx_bulk_allow(rxq) == true) {
403 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
404 "satisfied. Rx Burst Bulk Alloc function will be "
405 "used on port=%d, queue=%d.",
406 rxq->port_id, rxq->queue_id);
408 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
409 "not satisfied, Scattered Rx is requested "
410 "on port=%d, queue=%d.",
411 rxq->port_id, rxq->queue_id);
412 ad->rx_bulk_alloc_allowed = false;
415 if (check_rx_vec_allow(rxq) == false)
416 ad->rx_vec_allowed = false;
422 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
425 unsigned int socket_id,
426 const struct rte_eth_txconf *tx_conf)
428 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
429 struct iavf_tx_queue *txq;
430 const struct rte_memzone *mz;
432 uint16_t tx_rs_thresh, tx_free_thresh;
435 PMD_INIT_FUNC_TRACE();
437 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
439 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
440 nb_desc > IAVF_MAX_RING_DESC ||
441 nb_desc < IAVF_MIN_RING_DESC) {
442 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
447 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
448 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
449 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
450 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
451 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
453 /* Free memory if needed. */
454 if (dev->data->tx_queues[queue_idx]) {
455 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
456 dev->data->tx_queues[queue_idx] = NULL;
459 /* Allocate the TX queue data structure. */
460 txq = rte_zmalloc_socket("iavf txq",
461 sizeof(struct iavf_tx_queue),
465 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
466 "tx queue structure");
470 txq->nb_tx_desc = nb_desc;
471 txq->rs_thresh = tx_rs_thresh;
472 txq->free_thresh = tx_free_thresh;
473 txq->queue_id = queue_idx;
474 txq->port_id = dev->data->port_id;
475 txq->offloads = offloads;
476 txq->tx_deferred_start = tx_conf->tx_deferred_start;
478 /* Allocate software ring */
480 rte_zmalloc_socket("iavf tx sw ring",
481 sizeof(struct iavf_tx_entry) * nb_desc,
485 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
490 /* Allocate TX hardware ring descriptors. */
491 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
492 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
493 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
494 ring_size, IAVF_RING_BASE_ALIGN,
497 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
498 rte_free(txq->sw_ring);
502 txq->tx_ring_phys_addr = mz->iova;
503 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
508 dev->data->tx_queues[queue_idx] = txq;
509 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
510 txq->ops = &def_txq_ops;
512 if (check_tx_vec_allow(txq) == false) {
513 struct iavf_adapter *ad =
514 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
515 ad->tx_vec_allowed = false;
522 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
524 struct iavf_adapter *adapter =
525 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
526 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
527 struct iavf_rx_queue *rxq;
530 PMD_DRV_FUNC_TRACE();
532 if (rx_queue_id >= dev->data->nb_rx_queues)
535 rxq = dev->data->rx_queues[rx_queue_id];
537 err = alloc_rxq_mbufs(rxq);
539 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
545 /* Init the RX tail register. */
546 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
547 IAVF_WRITE_FLUSH(hw);
549 /* Ready to switch the queue on */
550 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
552 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
555 dev->data->rx_queue_state[rx_queue_id] =
556 RTE_ETH_QUEUE_STATE_STARTED;
562 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
564 struct iavf_adapter *adapter =
565 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
566 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
567 struct iavf_tx_queue *txq;
570 PMD_DRV_FUNC_TRACE();
572 if (tx_queue_id >= dev->data->nb_tx_queues)
575 txq = dev->data->tx_queues[tx_queue_id];
577 /* Init the RX tail register. */
578 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
579 IAVF_WRITE_FLUSH(hw);
581 /* Ready to switch the queue on */
582 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
585 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
588 dev->data->tx_queue_state[tx_queue_id] =
589 RTE_ETH_QUEUE_STATE_STARTED;
595 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
597 struct iavf_adapter *adapter =
598 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
599 struct iavf_rx_queue *rxq;
602 PMD_DRV_FUNC_TRACE();
604 if (rx_queue_id >= dev->data->nb_rx_queues)
607 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
609 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
614 rxq = dev->data->rx_queues[rx_queue_id];
615 rxq->ops->release_mbufs(rxq);
617 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
623 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
625 struct iavf_adapter *adapter =
626 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
627 struct iavf_tx_queue *txq;
630 PMD_DRV_FUNC_TRACE();
632 if (tx_queue_id >= dev->data->nb_tx_queues)
635 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
637 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
642 txq = dev->data->tx_queues[tx_queue_id];
643 txq->ops->release_mbufs(txq);
645 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
651 iavf_dev_rx_queue_release(void *rxq)
653 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
658 q->ops->release_mbufs(q);
659 rte_free(q->sw_ring);
660 rte_memzone_free(q->mz);
665 iavf_dev_tx_queue_release(void *txq)
667 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
672 q->ops->release_mbufs(q);
673 rte_free(q->sw_ring);
674 rte_memzone_free(q->mz);
679 iavf_stop_queues(struct rte_eth_dev *dev)
681 struct iavf_adapter *adapter =
682 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
683 struct iavf_rx_queue *rxq;
684 struct iavf_tx_queue *txq;
687 /* Stop All queues */
688 ret = iavf_disable_queues(adapter);
690 PMD_DRV_LOG(WARNING, "Fail to stop queues");
692 for (i = 0; i < dev->data->nb_tx_queues; i++) {
693 txq = dev->data->tx_queues[i];
696 txq->ops->release_mbufs(txq);
698 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
700 for (i = 0; i < dev->data->nb_rx_queues; i++) {
701 rxq = dev->data->rx_queues[i];
704 rxq->ops->release_mbufs(rxq);
706 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
711 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
713 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
714 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
715 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
717 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
723 /* Translate the rx descriptor status and error fields to pkt flags */
724 static inline uint64_t
725 iavf_rxd_to_pkt_flags(uint64_t qword)
728 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
730 #define IAVF_RX_ERR_BITS 0x3f
732 /* Check if RSS_HASH */
733 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
734 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
735 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
737 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
738 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
742 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
743 flags |= PKT_RX_IP_CKSUM_BAD;
745 flags |= PKT_RX_IP_CKSUM_GOOD;
747 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
748 flags |= PKT_RX_L4_CKSUM_BAD;
750 flags |= PKT_RX_L4_CKSUM_GOOD;
752 /* TODO: Oversize error bit is not processed here */
757 /* implement recv_pkts */
759 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
761 volatile union iavf_rx_desc *rx_ring;
762 volatile union iavf_rx_desc *rxdp;
763 struct iavf_rx_queue *rxq;
764 union iavf_rx_desc rxd;
765 struct rte_mbuf *rxe;
766 struct rte_eth_dev *dev;
767 struct rte_mbuf *rxm;
768 struct rte_mbuf *nmb;
772 uint16_t rx_packet_len;
773 uint16_t rx_id, nb_hold;
776 const uint32_t *ptype_tbl;
781 rx_id = rxq->rx_tail;
782 rx_ring = rxq->rx_ring;
783 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
785 while (nb_rx < nb_pkts) {
786 rxdp = &rx_ring[rx_id];
787 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
788 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
789 IAVF_RXD_QW1_STATUS_SHIFT;
791 /* Check the DD bit first */
792 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
794 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
796 nmb = rte_mbuf_raw_alloc(rxq->mp);
797 if (unlikely(!nmb)) {
798 dev = &rte_eth_devices[rxq->port_id];
799 dev->data->rx_mbuf_alloc_failed++;
800 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
801 "queue_id=%u", rxq->port_id, rxq->queue_id);
807 rxe = rxq->sw_ring[rx_id];
809 if (unlikely(rx_id == rxq->nb_rx_desc))
812 /* Prefetch next mbuf */
813 rte_prefetch0(rxq->sw_ring[rx_id]);
815 /* When next RX descriptor is on a cache line boundary,
816 * prefetch the next 4 RX descriptors and next 8 pointers
819 if ((rx_id & 0x3) == 0) {
820 rte_prefetch0(&rx_ring[rx_id]);
821 rte_prefetch0(rxq->sw_ring[rx_id]);
826 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
827 rxdp->read.hdr_addr = 0;
828 rxdp->read.pkt_addr = dma_addr;
830 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
831 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
833 rxm->data_off = RTE_PKTMBUF_HEADROOM;
834 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
837 rxm->pkt_len = rx_packet_len;
838 rxm->data_len = rx_packet_len;
839 rxm->port = rxq->port_id;
841 iavf_rxd_to_vlan_tci(rxm, &rxd);
842 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
844 ptype_tbl[(uint8_t)((qword1 &
845 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
847 if (pkt_flags & PKT_RX_RSS_HASH)
849 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
851 rxm->ol_flags |= pkt_flags;
853 rx_pkts[nb_rx++] = rxm;
855 rxq->rx_tail = rx_id;
857 /* If the number of free RX descriptors is greater than the RX free
858 * threshold of the queue, advance the receive tail register of queue.
859 * Update that register with the value of the last processed RX
860 * descriptor minus 1.
862 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
863 if (nb_hold > rxq->rx_free_thresh) {
864 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
865 "nb_hold=%u nb_rx=%u",
866 rxq->port_id, rxq->queue_id,
867 rx_id, nb_hold, nb_rx);
868 rx_id = (uint16_t)((rx_id == 0) ?
869 (rxq->nb_rx_desc - 1) : (rx_id - 1));
870 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
873 rxq->nb_rx_hold = nb_hold;
878 /* implement recv_scattered_pkts */
880 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
883 struct iavf_rx_queue *rxq = rx_queue;
884 union iavf_rx_desc rxd;
885 struct rte_mbuf *rxe;
886 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
887 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
888 struct rte_mbuf *nmb, *rxm;
889 uint16_t rx_id = rxq->rx_tail;
890 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
891 struct rte_eth_dev *dev;
897 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
898 volatile union iavf_rx_desc *rxdp;
899 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
901 while (nb_rx < nb_pkts) {
902 rxdp = &rx_ring[rx_id];
903 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
904 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
905 IAVF_RXD_QW1_STATUS_SHIFT;
907 /* Check the DD bit */
908 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
910 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
912 nmb = rte_mbuf_raw_alloc(rxq->mp);
913 if (unlikely(!nmb)) {
914 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
915 "queue_id=%u", rxq->port_id, rxq->queue_id);
916 dev = &rte_eth_devices[rxq->port_id];
917 dev->data->rx_mbuf_alloc_failed++;
923 rxe = rxq->sw_ring[rx_id];
925 if (rx_id == rxq->nb_rx_desc)
928 /* Prefetch next mbuf */
929 rte_prefetch0(rxq->sw_ring[rx_id]);
931 /* When next RX descriptor is on a cache line boundary,
932 * prefetch the next 4 RX descriptors and next 8 pointers
935 if ((rx_id & 0x3) == 0) {
936 rte_prefetch0(&rx_ring[rx_id]);
937 rte_prefetch0(rxq->sw_ring[rx_id]);
943 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
945 /* Set data buffer address and data length of the mbuf */
946 rxdp->read.hdr_addr = 0;
947 rxdp->read.pkt_addr = dma_addr;
948 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
949 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
950 rxm->data_len = rx_packet_len;
951 rxm->data_off = RTE_PKTMBUF_HEADROOM;
953 /* If this is the first buffer of the received packet, set the
954 * pointer to the first mbuf of the packet and initialize its
955 * context. Otherwise, update the total length and the number
956 * of segments of the current scattered packet, and update the
957 * pointer to the last mbuf of the current packet.
961 first_seg->nb_segs = 1;
962 first_seg->pkt_len = rx_packet_len;
965 (uint16_t)(first_seg->pkt_len +
967 first_seg->nb_segs++;
968 last_seg->next = rxm;
971 /* If this is not the last buffer of the received packet,
972 * update the pointer to the last mbuf of the current scattered
973 * packet and continue to parse the RX ring.
975 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
980 /* This is the last buffer of the received packet. If the CRC
981 * is not stripped by the hardware:
982 * - Subtract the CRC length from the total packet length.
983 * - If the last buffer only contains the whole CRC or a part
984 * of it, free the mbuf associated to the last buffer. If part
985 * of the CRC is also contained in the previous mbuf, subtract
986 * the length of that CRC part from the data length of the
990 if (unlikely(rxq->crc_len > 0)) {
991 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
992 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
993 rte_pktmbuf_free_seg(rxm);
994 first_seg->nb_segs--;
996 (uint16_t)(last_seg->data_len -
997 (RTE_ETHER_CRC_LEN - rx_packet_len));
998 last_seg->next = NULL;
1000 rxm->data_len = (uint16_t)(rx_packet_len -
1004 first_seg->port = rxq->port_id;
1005 first_seg->ol_flags = 0;
1006 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1007 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1008 first_seg->packet_type =
1009 ptype_tbl[(uint8_t)((qword1 &
1010 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1012 if (pkt_flags & PKT_RX_RSS_HASH)
1013 first_seg->hash.rss =
1014 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1016 first_seg->ol_flags |= pkt_flags;
1018 /* Prefetch data of first segment, if configured to do so. */
1019 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1020 first_seg->data_off));
1021 rx_pkts[nb_rx++] = first_seg;
1025 /* Record index of the next RX descriptor to probe. */
1026 rxq->rx_tail = rx_id;
1027 rxq->pkt_first_seg = first_seg;
1028 rxq->pkt_last_seg = last_seg;
1030 /* If the number of free RX descriptors is greater than the RX free
1031 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1032 * register. Update the RDT with the value of the last processed RX
1033 * descriptor minus 1, to guarantee that the RDT register is never
1034 * equal to the RDH register, which creates a "full" ring situtation
1035 * from the hardware point of view.
1037 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1038 if (nb_hold > rxq->rx_free_thresh) {
1039 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1040 "nb_hold=%u nb_rx=%u",
1041 rxq->port_id, rxq->queue_id,
1042 rx_id, nb_hold, nb_rx);
1043 rx_id = (uint16_t)(rx_id == 0 ?
1044 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1045 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1048 rxq->nb_rx_hold = nb_hold;
1053 #define IAVF_LOOK_AHEAD 8
1055 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1057 volatile union iavf_rx_desc *rxdp;
1058 struct rte_mbuf **rxep;
1059 struct rte_mbuf *mb;
1063 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1064 int32_t i, j, nb_rx = 0;
1066 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1068 rxdp = &rxq->rx_ring[rxq->rx_tail];
1069 rxep = &rxq->sw_ring[rxq->rx_tail];
1071 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1072 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1073 IAVF_RXD_QW1_STATUS_SHIFT;
1075 /* Make sure there is at least 1 packet to receive */
1076 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1079 /* Scan LOOK_AHEAD descriptors at a time to determine which
1080 * descriptors reference packets that are ready to be received.
1082 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1083 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1084 /* Read desc statuses backwards to avoid race condition */
1085 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1086 qword1 = rte_le_to_cpu_64(
1087 rxdp[j].wb.qword1.status_error_len);
1088 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1089 IAVF_RXD_QW1_STATUS_SHIFT;
1094 /* Compute how many status bits were set */
1095 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1096 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1100 /* Translate descriptor info to mbuf parameters */
1101 for (j = 0; j < nb_dd; j++) {
1102 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1103 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1106 qword1 = rte_le_to_cpu_64
1107 (rxdp[j].wb.qword1.status_error_len);
1108 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1109 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1110 mb->data_len = pkt_len;
1111 mb->pkt_len = pkt_len;
1113 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1114 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1116 ptype_tbl[(uint8_t)((qword1 &
1117 IAVF_RXD_QW1_PTYPE_MASK) >>
1118 IAVF_RXD_QW1_PTYPE_SHIFT)];
1120 if (pkt_flags & PKT_RX_RSS_HASH)
1121 mb->hash.rss = rte_le_to_cpu_32(
1122 rxdp[j].wb.qword0.hi_dword.rss);
1124 mb->ol_flags |= pkt_flags;
1127 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1128 rxq->rx_stage[i + j] = rxep[j];
1130 if (nb_dd != IAVF_LOOK_AHEAD)
1134 /* Clear software ring entries */
1135 for (i = 0; i < nb_rx; i++)
1136 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1141 static inline uint16_t
1142 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1143 struct rte_mbuf **rx_pkts,
1147 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1149 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1151 for (i = 0; i < nb_pkts; i++)
1152 rx_pkts[i] = stage[i];
1154 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1155 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1161 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1163 volatile union iavf_rx_desc *rxdp;
1164 struct rte_mbuf **rxep;
1165 struct rte_mbuf *mb;
1166 uint16_t alloc_idx, i;
1170 /* Allocate buffers in bulk */
1171 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1172 (rxq->rx_free_thresh - 1));
1173 rxep = &rxq->sw_ring[alloc_idx];
1174 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1175 rxq->rx_free_thresh);
1176 if (unlikely(diag != 0)) {
1177 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1181 rxdp = &rxq->rx_ring[alloc_idx];
1182 for (i = 0; i < rxq->rx_free_thresh; i++) {
1183 if (likely(i < (rxq->rx_free_thresh - 1)))
1184 /* Prefetch next mbuf */
1185 rte_prefetch0(rxep[i + 1]);
1188 rte_mbuf_refcnt_set(mb, 1);
1190 mb->data_off = RTE_PKTMBUF_HEADROOM;
1192 mb->port = rxq->port_id;
1193 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1194 rxdp[i].read.hdr_addr = 0;
1195 rxdp[i].read.pkt_addr = dma_addr;
1198 /* Update rx tail register */
1200 IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1202 rxq->rx_free_trigger =
1203 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1204 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1205 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1210 static inline uint16_t
1211 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1213 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1219 if (rxq->rx_nb_avail)
1220 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1222 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1223 rxq->rx_next_avail = 0;
1224 rxq->rx_nb_avail = nb_rx;
1225 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1227 if (rxq->rx_tail > rxq->rx_free_trigger) {
1228 if (iavf_rx_alloc_bufs(rxq) != 0) {
1231 /* TODO: count rx_mbuf_alloc_failed here */
1233 rxq->rx_nb_avail = 0;
1234 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1235 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1236 rxq->sw_ring[j] = rxq->rx_stage[i];
1242 if (rxq->rx_tail >= rxq->nb_rx_desc)
1245 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1246 rxq->port_id, rxq->queue_id,
1247 rxq->rx_tail, nb_rx);
1249 if (rxq->rx_nb_avail)
1250 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1256 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1257 struct rte_mbuf **rx_pkts,
1260 uint16_t nb_rx = 0, n, count;
1262 if (unlikely(nb_pkts == 0))
1265 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1266 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1269 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1270 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1271 nb_rx = (uint16_t)(nb_rx + count);
1272 nb_pkts = (uint16_t)(nb_pkts - count);
1281 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1283 struct iavf_tx_entry *sw_ring = txq->sw_ring;
1284 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1285 uint16_t nb_tx_desc = txq->nb_tx_desc;
1286 uint16_t desc_to_clean_to;
1287 uint16_t nb_tx_to_clean;
1289 volatile struct iavf_tx_desc *txd = txq->tx_ring;
1291 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1292 if (desc_to_clean_to >= nb_tx_desc)
1293 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1295 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1296 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1297 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1298 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1299 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1300 "(port=%d queue=%d)", desc_to_clean_to,
1301 txq->port_id, txq->queue_id);
1305 if (last_desc_cleaned > desc_to_clean_to)
1306 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1309 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1312 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1314 txq->last_desc_cleaned = desc_to_clean_to;
1315 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1320 /* Check if the context descriptor is needed for TX offloading */
1321 static inline uint16_t
1322 iavf_calc_context_desc(uint64_t flags)
1324 static uint64_t mask = PKT_TX_TCP_SEG;
1326 return (flags & mask) ? 1 : 0;
1330 iavf_txd_enable_checksum(uint64_t ol_flags,
1332 uint32_t *td_offset,
1333 union iavf_tx_offload tx_offload)
1336 *td_offset |= (tx_offload.l2_len >> 1) <<
1337 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1339 /* Enable L3 checksum offloads */
1340 if (ol_flags & PKT_TX_IP_CKSUM) {
1341 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1342 *td_offset |= (tx_offload.l3_len >> 2) <<
1343 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1344 } else if (ol_flags & PKT_TX_IPV4) {
1345 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
1346 *td_offset |= (tx_offload.l3_len >> 2) <<
1347 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1348 } else if (ol_flags & PKT_TX_IPV6) {
1349 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1350 *td_offset |= (tx_offload.l3_len >> 2) <<
1351 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1354 if (ol_flags & PKT_TX_TCP_SEG) {
1355 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1356 *td_offset |= (tx_offload.l4_len >> 2) <<
1357 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1361 /* Enable L4 checksum offloads */
1362 switch (ol_flags & PKT_TX_L4_MASK) {
1363 case PKT_TX_TCP_CKSUM:
1364 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1365 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
1366 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1368 case PKT_TX_SCTP_CKSUM:
1369 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
1370 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
1371 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1373 case PKT_TX_UDP_CKSUM:
1374 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
1375 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
1376 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1383 /* set TSO context descriptor
1384 * support IP -> L4 and IP -> IP -> L4
1386 static inline uint64_t
1387 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
1389 uint64_t ctx_desc = 0;
1390 uint32_t cd_cmd, hdr_len, cd_tso_len;
1392 if (!tx_offload.l4_len) {
1393 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1397 hdr_len = tx_offload.l2_len +
1401 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1402 cd_tso_len = mbuf->pkt_len - hdr_len;
1403 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1404 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1405 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1410 /* Construct the tx flags */
1411 static inline uint64_t
1412 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
1415 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
1416 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
1417 ((uint64_t)td_offset <<
1418 IAVF_TXD_QW1_OFFSET_SHIFT) |
1420 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
1421 ((uint64_t)td_tag <<
1422 IAVF_TXD_QW1_L2TAG1_SHIFT));
1427 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1429 volatile struct iavf_tx_desc *txd;
1430 volatile struct iavf_tx_desc *txr;
1431 struct iavf_tx_queue *txq;
1432 struct iavf_tx_entry *sw_ring;
1433 struct iavf_tx_entry *txe, *txn;
1434 struct rte_mbuf *tx_pkt;
1435 struct rte_mbuf *m_seg;
1446 uint64_t buf_dma_addr;
1447 union iavf_tx_offload tx_offload = {0};
1450 sw_ring = txq->sw_ring;
1452 tx_id = txq->tx_tail;
1453 txe = &sw_ring[tx_id];
1455 /* Check if the descriptor ring needs to be cleaned. */
1456 if (txq->nb_free < txq->free_thresh)
1457 iavf_xmit_cleanup(txq);
1459 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1464 tx_pkt = *tx_pkts++;
1465 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1467 ol_flags = tx_pkt->ol_flags;
1468 tx_offload.l2_len = tx_pkt->l2_len;
1469 tx_offload.l3_len = tx_pkt->l3_len;
1470 tx_offload.l4_len = tx_pkt->l4_len;
1471 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1473 /* Calculate the number of context descriptors needed. */
1474 nb_ctx = iavf_calc_context_desc(ol_flags);
1476 /* The number of descriptors that must be allocated for
1477 * a packet equals to the number of the segments of that
1478 * packet plus 1 context descriptor if needed.
1480 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1481 tx_last = (uint16_t)(tx_id + nb_used - 1);
1484 if (tx_last >= txq->nb_tx_desc)
1485 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1487 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
1488 " tx_first=%u tx_last=%u",
1489 txq->port_id, txq->queue_id, tx_id, tx_last);
1491 if (nb_used > txq->nb_free) {
1492 if (iavf_xmit_cleanup(txq)) {
1497 if (unlikely(nb_used > txq->rs_thresh)) {
1498 while (nb_used > txq->nb_free) {
1499 if (iavf_xmit_cleanup(txq)) {
1508 /* Descriptor based VLAN insertion */
1509 if (ol_flags & PKT_TX_VLAN_PKT) {
1510 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
1511 td_tag = tx_pkt->vlan_tci;
1514 /* According to datasheet, the bit2 is reserved and must be
1519 /* Enable checksum offloading */
1520 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
1521 iavf_txd_enable_checksum(ol_flags, &td_cmd,
1522 &td_offset, tx_offload);
1525 /* Setup TX context descriptor if required */
1526 uint64_t cd_type_cmd_tso_mss =
1527 IAVF_TX_DESC_DTYPE_CONTEXT;
1528 volatile struct iavf_tx_context_desc *ctx_txd =
1529 (volatile struct iavf_tx_context_desc *)
1532 txn = &sw_ring[txe->next_id];
1533 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1535 rte_pktmbuf_free_seg(txe->mbuf);
1540 if (ol_flags & PKT_TX_TCP_SEG)
1541 cd_type_cmd_tso_mss |=
1542 iavf_set_tso_ctx(tx_pkt, tx_offload);
1544 ctx_txd->type_cmd_tso_mss =
1545 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1547 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
1548 txe->last_id = tx_last;
1549 tx_id = txe->next_id;
1556 txn = &sw_ring[txe->next_id];
1559 rte_pktmbuf_free_seg(txe->mbuf);
1562 /* Setup TX Descriptor */
1563 slen = m_seg->data_len;
1564 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1565 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
1566 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
1571 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
1572 txe->last_id = tx_last;
1573 tx_id = txe->next_id;
1575 m_seg = m_seg->next;
1578 /* The last packet data descriptor needs End Of Packet (EOP) */
1579 td_cmd |= IAVF_TX_DESC_CMD_EOP;
1580 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
1581 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
1583 if (txq->nb_used >= txq->rs_thresh) {
1584 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
1585 "%4u (port=%d queue=%d)",
1586 tx_last, txq->port_id, txq->queue_id);
1588 td_cmd |= IAVF_TX_DESC_CMD_RS;
1590 /* Update txq RS bit counters */
1594 txd->cmd_type_offset_bsz |=
1595 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
1596 IAVF_TXD_QW1_CMD_SHIFT);
1597 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
1603 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
1604 txq->port_id, txq->queue_id, tx_id, nb_tx);
1606 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
1607 txq->tx_tail = tx_id;
1612 /* TX prep functions */
1614 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1621 for (i = 0; i < nb_pkts; i++) {
1623 ol_flags = m->ol_flags;
1625 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
1626 if (!(ol_flags & PKT_TX_TCP_SEG)) {
1627 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
1631 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
1632 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
1633 /* MSS outside the range are considered malicious */
1638 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
1639 rte_errno = ENOTSUP;
1643 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1644 ret = rte_validate_tx_offload(m);
1650 ret = rte_net_intel_cksum_prepare(m);
1660 /* choose rx function*/
1662 iavf_set_rx_function(struct rte_eth_dev *dev)
1664 struct iavf_adapter *adapter =
1665 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1667 struct iavf_rx_queue *rxq;
1669 bool use_avx2 = false;
1671 if (!iavf_rx_vec_dev_check(dev)) {
1672 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1673 rxq = dev->data->rx_queues[i];
1674 (void)iavf_rxq_vec_setup(rxq);
1677 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
1678 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
1681 if (dev->data->scattered_rx) {
1683 "Using %sVector Scattered Rx (port %d).",
1684 use_avx2 ? "avx2 " : "",
1685 dev->data->port_id);
1686 dev->rx_pkt_burst = use_avx2 ?
1687 iavf_recv_scattered_pkts_vec_avx2 :
1688 iavf_recv_scattered_pkts_vec;
1690 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
1691 use_avx2 ? "avx2 " : "",
1692 dev->data->port_id);
1693 dev->rx_pkt_burst = use_avx2 ?
1694 iavf_recv_pkts_vec_avx2 :
1702 if (dev->data->scattered_rx) {
1703 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
1704 dev->data->port_id);
1705 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
1706 } else if (adapter->rx_bulk_alloc_allowed) {
1707 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
1708 dev->data->port_id);
1709 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
1711 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
1712 dev->data->port_id);
1713 dev->rx_pkt_burst = iavf_recv_pkts;
1717 /* choose tx function*/
1719 iavf_set_tx_function(struct rte_eth_dev *dev)
1722 struct iavf_tx_queue *txq;
1724 bool use_avx2 = false;
1726 if (!iavf_tx_vec_dev_check(dev)) {
1727 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1728 txq = dev->data->tx_queues[i];
1731 iavf_txq_vec_setup(txq);
1734 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
1735 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
1738 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
1739 use_avx2 ? "avx2 " : "",
1740 dev->data->port_id);
1741 dev->tx_pkt_burst = use_avx2 ?
1742 iavf_xmit_pkts_vec_avx2 :
1744 dev->tx_pkt_prepare = NULL;
1750 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
1751 dev->data->port_id);
1752 dev->tx_pkt_burst = iavf_xmit_pkts;
1753 dev->tx_pkt_prepare = iavf_prep_pkts;
1757 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1758 struct rte_eth_rxq_info *qinfo)
1760 struct iavf_rx_queue *rxq;
1762 rxq = dev->data->rx_queues[queue_id];
1764 qinfo->mp = rxq->mp;
1765 qinfo->scattered_rx = dev->data->scattered_rx;
1766 qinfo->nb_desc = rxq->nb_rx_desc;
1768 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1769 qinfo->conf.rx_drop_en = true;
1770 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1774 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1775 struct rte_eth_txq_info *qinfo)
1777 struct iavf_tx_queue *txq;
1779 txq = dev->data->tx_queues[queue_id];
1781 qinfo->nb_desc = txq->nb_tx_desc;
1783 qinfo->conf.tx_free_thresh = txq->free_thresh;
1784 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
1785 qinfo->conf.offloads = txq->offloads;
1786 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1789 /* Get the number of used descriptors of a rx queue */
1791 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
1793 #define IAVF_RXQ_SCAN_INTERVAL 4
1794 volatile union iavf_rx_desc *rxdp;
1795 struct iavf_rx_queue *rxq;
1798 rxq = dev->data->rx_queues[queue_id];
1799 rxdp = &rxq->rx_ring[rxq->rx_tail];
1800 while ((desc < rxq->nb_rx_desc) &&
1801 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1802 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
1803 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
1804 /* Check the DD bit of a rx descriptor of each 4 in a group,
1805 * to avoid checking too frequently and downgrading performance
1808 desc += IAVF_RXQ_SCAN_INTERVAL;
1809 rxdp += IAVF_RXQ_SCAN_INTERVAL;
1810 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1811 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1812 desc - rxq->nb_rx_desc]);
1819 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
1821 struct iavf_rx_queue *rxq = rx_queue;
1822 volatile uint64_t *status;
1826 if (unlikely(offset >= rxq->nb_rx_desc))
1829 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1830 return RTE_ETH_RX_DESC_UNAVAIL;
1832 desc = rxq->rx_tail + offset;
1833 if (desc >= rxq->nb_rx_desc)
1834 desc -= rxq->nb_rx_desc;
1836 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
1837 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
1838 << IAVF_RXD_QW1_STATUS_SHIFT);
1840 return RTE_ETH_RX_DESC_DONE;
1842 return RTE_ETH_RX_DESC_AVAIL;
1846 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
1848 struct iavf_tx_queue *txq = tx_queue;
1849 volatile uint64_t *status;
1850 uint64_t mask, expect;
1853 if (unlikely(offset >= txq->nb_tx_desc))
1856 desc = txq->tx_tail + offset;
1857 /* go to next desc that has the RS bit */
1858 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
1860 if (desc >= txq->nb_tx_desc) {
1861 desc -= txq->nb_tx_desc;
1862 if (desc >= txq->nb_tx_desc)
1863 desc -= txq->nb_tx_desc;
1866 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1867 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
1868 expect = rte_cpu_to_le_64(
1869 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
1870 if ((*status & mask) == expect)
1871 return RTE_ETH_TX_DESC_DONE;
1873 return RTE_ETH_TX_DESC_FULL;
1877 iavf_get_default_ptype_table(void)
1879 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
1880 __rte_cache_aligned = {
1883 [1] = RTE_PTYPE_L2_ETHER,
1884 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
1885 /* [3] - [5] reserved */
1886 [6] = RTE_PTYPE_L2_ETHER_LLDP,
1887 /* [7] - [10] reserved */
1888 [11] = RTE_PTYPE_L2_ETHER_ARP,
1889 /* [12] - [21] reserved */
1891 /* Non tunneled IPv4 */
1892 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1894 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1895 RTE_PTYPE_L4_NONFRAG,
1896 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1899 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1901 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1903 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1907 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1908 RTE_PTYPE_TUNNEL_IP |
1909 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1910 RTE_PTYPE_INNER_L4_FRAG,
1911 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1912 RTE_PTYPE_TUNNEL_IP |
1913 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1914 RTE_PTYPE_INNER_L4_NONFRAG,
1915 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1916 RTE_PTYPE_TUNNEL_IP |
1917 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1918 RTE_PTYPE_INNER_L4_UDP,
1920 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1921 RTE_PTYPE_TUNNEL_IP |
1922 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1923 RTE_PTYPE_INNER_L4_TCP,
1924 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1925 RTE_PTYPE_TUNNEL_IP |
1926 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1927 RTE_PTYPE_INNER_L4_SCTP,
1928 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1929 RTE_PTYPE_TUNNEL_IP |
1930 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1931 RTE_PTYPE_INNER_L4_ICMP,
1934 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1935 RTE_PTYPE_TUNNEL_IP |
1936 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1937 RTE_PTYPE_INNER_L4_FRAG,
1938 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1939 RTE_PTYPE_TUNNEL_IP |
1940 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1941 RTE_PTYPE_INNER_L4_NONFRAG,
1942 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1943 RTE_PTYPE_TUNNEL_IP |
1944 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1945 RTE_PTYPE_INNER_L4_UDP,
1947 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1948 RTE_PTYPE_TUNNEL_IP |
1949 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1950 RTE_PTYPE_INNER_L4_TCP,
1951 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1952 RTE_PTYPE_TUNNEL_IP |
1953 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1954 RTE_PTYPE_INNER_L4_SCTP,
1955 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1956 RTE_PTYPE_TUNNEL_IP |
1957 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1958 RTE_PTYPE_INNER_L4_ICMP,
1960 /* IPv4 --> GRE/Teredo/VXLAN */
1961 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1962 RTE_PTYPE_TUNNEL_GRENAT,
1964 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
1965 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1966 RTE_PTYPE_TUNNEL_GRENAT |
1967 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1968 RTE_PTYPE_INNER_L4_FRAG,
1969 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1970 RTE_PTYPE_TUNNEL_GRENAT |
1971 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1972 RTE_PTYPE_INNER_L4_NONFRAG,
1973 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1974 RTE_PTYPE_TUNNEL_GRENAT |
1975 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1976 RTE_PTYPE_INNER_L4_UDP,
1978 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1979 RTE_PTYPE_TUNNEL_GRENAT |
1980 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1981 RTE_PTYPE_INNER_L4_TCP,
1982 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1983 RTE_PTYPE_TUNNEL_GRENAT |
1984 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1985 RTE_PTYPE_INNER_L4_SCTP,
1986 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1987 RTE_PTYPE_TUNNEL_GRENAT |
1988 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1989 RTE_PTYPE_INNER_L4_ICMP,
1991 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
1992 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1993 RTE_PTYPE_TUNNEL_GRENAT |
1994 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1995 RTE_PTYPE_INNER_L4_FRAG,
1996 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1997 RTE_PTYPE_TUNNEL_GRENAT |
1998 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1999 RTE_PTYPE_INNER_L4_NONFRAG,
2000 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2001 RTE_PTYPE_TUNNEL_GRENAT |
2002 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2003 RTE_PTYPE_INNER_L4_UDP,
2005 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2006 RTE_PTYPE_TUNNEL_GRENAT |
2007 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2008 RTE_PTYPE_INNER_L4_TCP,
2009 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2010 RTE_PTYPE_TUNNEL_GRENAT |
2011 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2012 RTE_PTYPE_INNER_L4_SCTP,
2013 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2014 RTE_PTYPE_TUNNEL_GRENAT |
2015 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2016 RTE_PTYPE_INNER_L4_ICMP,
2018 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2019 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2020 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2022 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2023 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2024 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2025 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2026 RTE_PTYPE_INNER_L4_FRAG,
2027 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2028 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2029 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2030 RTE_PTYPE_INNER_L4_NONFRAG,
2031 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2032 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2033 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2034 RTE_PTYPE_INNER_L4_UDP,
2036 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2037 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2038 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2039 RTE_PTYPE_INNER_L4_TCP,
2040 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2041 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2042 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2043 RTE_PTYPE_INNER_L4_SCTP,
2044 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2045 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2046 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2047 RTE_PTYPE_INNER_L4_ICMP,
2049 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2050 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2051 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2052 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2053 RTE_PTYPE_INNER_L4_FRAG,
2054 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2055 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2056 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2057 RTE_PTYPE_INNER_L4_NONFRAG,
2058 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2059 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2060 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2061 RTE_PTYPE_INNER_L4_UDP,
2063 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2064 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2065 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2066 RTE_PTYPE_INNER_L4_TCP,
2067 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2068 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2069 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2070 RTE_PTYPE_INNER_L4_SCTP,
2071 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2072 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2073 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2074 RTE_PTYPE_INNER_L4_ICMP,
2075 /* [73] - [87] reserved */
2077 /* Non tunneled IPv6 */
2078 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2080 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2081 RTE_PTYPE_L4_NONFRAG,
2082 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2085 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2087 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2089 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2093 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2094 RTE_PTYPE_TUNNEL_IP |
2095 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2096 RTE_PTYPE_INNER_L4_FRAG,
2097 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2098 RTE_PTYPE_TUNNEL_IP |
2099 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2100 RTE_PTYPE_INNER_L4_NONFRAG,
2101 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2102 RTE_PTYPE_TUNNEL_IP |
2103 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2104 RTE_PTYPE_INNER_L4_UDP,
2106 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2107 RTE_PTYPE_TUNNEL_IP |
2108 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2109 RTE_PTYPE_INNER_L4_TCP,
2110 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2111 RTE_PTYPE_TUNNEL_IP |
2112 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2113 RTE_PTYPE_INNER_L4_SCTP,
2114 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2115 RTE_PTYPE_TUNNEL_IP |
2116 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2117 RTE_PTYPE_INNER_L4_ICMP,
2120 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2121 RTE_PTYPE_TUNNEL_IP |
2122 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2123 RTE_PTYPE_INNER_L4_FRAG,
2124 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2125 RTE_PTYPE_TUNNEL_IP |
2126 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2127 RTE_PTYPE_INNER_L4_NONFRAG,
2128 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2129 RTE_PTYPE_TUNNEL_IP |
2130 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2131 RTE_PTYPE_INNER_L4_UDP,
2132 /* [105] reserved */
2133 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2134 RTE_PTYPE_TUNNEL_IP |
2135 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2136 RTE_PTYPE_INNER_L4_TCP,
2137 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2138 RTE_PTYPE_TUNNEL_IP |
2139 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2140 RTE_PTYPE_INNER_L4_SCTP,
2141 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2142 RTE_PTYPE_TUNNEL_IP |
2143 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2144 RTE_PTYPE_INNER_L4_ICMP,
2146 /* IPv6 --> GRE/Teredo/VXLAN */
2147 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2148 RTE_PTYPE_TUNNEL_GRENAT,
2150 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2151 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2152 RTE_PTYPE_TUNNEL_GRENAT |
2153 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2154 RTE_PTYPE_INNER_L4_FRAG,
2155 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2156 RTE_PTYPE_TUNNEL_GRENAT |
2157 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2158 RTE_PTYPE_INNER_L4_NONFRAG,
2159 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2160 RTE_PTYPE_TUNNEL_GRENAT |
2161 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2162 RTE_PTYPE_INNER_L4_UDP,
2163 /* [113] reserved */
2164 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2165 RTE_PTYPE_TUNNEL_GRENAT |
2166 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2167 RTE_PTYPE_INNER_L4_TCP,
2168 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2169 RTE_PTYPE_TUNNEL_GRENAT |
2170 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2171 RTE_PTYPE_INNER_L4_SCTP,
2172 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2173 RTE_PTYPE_TUNNEL_GRENAT |
2174 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2175 RTE_PTYPE_INNER_L4_ICMP,
2177 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2178 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2179 RTE_PTYPE_TUNNEL_GRENAT |
2180 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2181 RTE_PTYPE_INNER_L4_FRAG,
2182 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2183 RTE_PTYPE_TUNNEL_GRENAT |
2184 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2185 RTE_PTYPE_INNER_L4_NONFRAG,
2186 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2187 RTE_PTYPE_TUNNEL_GRENAT |
2188 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2189 RTE_PTYPE_INNER_L4_UDP,
2190 /* [120] reserved */
2191 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2192 RTE_PTYPE_TUNNEL_GRENAT |
2193 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2194 RTE_PTYPE_INNER_L4_TCP,
2195 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2196 RTE_PTYPE_TUNNEL_GRENAT |
2197 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2198 RTE_PTYPE_INNER_L4_SCTP,
2199 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2200 RTE_PTYPE_TUNNEL_GRENAT |
2201 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2202 RTE_PTYPE_INNER_L4_ICMP,
2204 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2205 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2206 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2208 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2209 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2210 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2211 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2212 RTE_PTYPE_INNER_L4_FRAG,
2213 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2214 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2215 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2216 RTE_PTYPE_INNER_L4_NONFRAG,
2217 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2218 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2219 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2220 RTE_PTYPE_INNER_L4_UDP,
2221 /* [128] reserved */
2222 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2223 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2224 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2225 RTE_PTYPE_INNER_L4_TCP,
2226 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2227 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2228 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2229 RTE_PTYPE_INNER_L4_SCTP,
2230 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2231 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2232 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2233 RTE_PTYPE_INNER_L4_ICMP,
2235 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2236 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2237 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2238 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2239 RTE_PTYPE_INNER_L4_FRAG,
2240 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2241 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2242 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2243 RTE_PTYPE_INNER_L4_NONFRAG,
2244 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2245 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2246 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2247 RTE_PTYPE_INNER_L4_UDP,
2248 /* [135] reserved */
2249 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2250 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2251 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2252 RTE_PTYPE_INNER_L4_TCP,
2253 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2254 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2255 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2256 RTE_PTYPE_INNER_L4_SCTP,
2257 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2258 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2259 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2260 RTE_PTYPE_INNER_L4_ICMP,
2261 /* [139] - [299] reserved */
2264 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
2265 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
2267 /* PPPoE --> IPv4 */
2268 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
2269 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2271 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
2272 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2273 RTE_PTYPE_L4_NONFRAG,
2274 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
2275 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2277 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
2278 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2280 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
2281 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2283 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
2284 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2287 /* PPPoE --> IPv6 */
2288 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
2289 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2291 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
2292 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2293 RTE_PTYPE_L4_NONFRAG,
2294 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
2295 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2297 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
2298 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2300 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
2301 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2303 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
2304 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2306 /* [314] - [324] reserved */
2308 /* IPv4/IPv6 --> GTPC/GTPU */
2309 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2310 RTE_PTYPE_TUNNEL_GTPC,
2311 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2312 RTE_PTYPE_TUNNEL_GTPC,
2313 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2314 RTE_PTYPE_TUNNEL_GTPC,
2315 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2316 RTE_PTYPE_TUNNEL_GTPC,
2317 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2318 RTE_PTYPE_TUNNEL_GTPU,
2319 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2320 RTE_PTYPE_TUNNEL_GTPU,
2322 /* IPv4 --> GTPU --> IPv4 */
2323 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2324 RTE_PTYPE_TUNNEL_GTPU |
2325 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2326 RTE_PTYPE_INNER_L4_FRAG,
2327 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2328 RTE_PTYPE_TUNNEL_GTPU |
2329 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2330 RTE_PTYPE_INNER_L4_NONFRAG,
2331 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2332 RTE_PTYPE_TUNNEL_GTPU |
2333 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2334 RTE_PTYPE_INNER_L4_UDP,
2335 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2336 RTE_PTYPE_TUNNEL_GTPU |
2337 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2338 RTE_PTYPE_INNER_L4_TCP,
2339 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2340 RTE_PTYPE_TUNNEL_GTPU |
2341 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2342 RTE_PTYPE_INNER_L4_ICMP,
2344 /* IPv6 --> GTPU --> IPv4 */
2345 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2346 RTE_PTYPE_TUNNEL_GTPU |
2347 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2348 RTE_PTYPE_INNER_L4_FRAG,
2349 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2350 RTE_PTYPE_TUNNEL_GTPU |
2351 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2352 RTE_PTYPE_INNER_L4_NONFRAG,
2353 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2354 RTE_PTYPE_TUNNEL_GTPU |
2355 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2356 RTE_PTYPE_INNER_L4_UDP,
2357 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2358 RTE_PTYPE_TUNNEL_GTPU |
2359 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2360 RTE_PTYPE_INNER_L4_TCP,
2361 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2362 RTE_PTYPE_TUNNEL_GTPU |
2363 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2364 RTE_PTYPE_INNER_L4_ICMP,
2366 /* IPv4 --> GTPU --> IPv6 */
2367 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2368 RTE_PTYPE_TUNNEL_GTPU |
2369 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2370 RTE_PTYPE_INNER_L4_FRAG,
2371 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2372 RTE_PTYPE_TUNNEL_GTPU |
2373 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2374 RTE_PTYPE_INNER_L4_NONFRAG,
2375 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2376 RTE_PTYPE_TUNNEL_GTPU |
2377 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2378 RTE_PTYPE_INNER_L4_UDP,
2379 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2380 RTE_PTYPE_TUNNEL_GTPU |
2381 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2382 RTE_PTYPE_INNER_L4_TCP,
2383 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2384 RTE_PTYPE_TUNNEL_GTPU |
2385 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2386 RTE_PTYPE_INNER_L4_ICMP,
2388 /* IPv6 --> GTPU --> IPv6 */
2389 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2390 RTE_PTYPE_TUNNEL_GTPU |
2391 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2392 RTE_PTYPE_INNER_L4_FRAG,
2393 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2394 RTE_PTYPE_TUNNEL_GTPU |
2395 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2396 RTE_PTYPE_INNER_L4_NONFRAG,
2397 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2398 RTE_PTYPE_TUNNEL_GTPU |
2399 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2400 RTE_PTYPE_INNER_L4_UDP,
2401 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2402 RTE_PTYPE_TUNNEL_GTPU |
2403 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2404 RTE_PTYPE_INNER_L4_TCP,
2405 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2406 RTE_PTYPE_TUNNEL_GTPU |
2407 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2408 RTE_PTYPE_INNER_L4_ICMP,
2409 /* All others reserved */