1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
10 #define ICE_TD_CMD ICE_TX_DESC_CMD_EOP
12 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
16 PKT_TX_OUTER_IP_CKSUM)
18 #define ICE_RX_ERR_BITS 0x3f
20 static enum ice_status
21 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
23 struct ice_vsi *vsi = rxq->vsi;
24 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
25 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
26 struct ice_rlan_ctx rx_ctx;
28 uint16_t buf_size, len;
29 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
33 * The kernel driver uses flex descriptor. It sets the register
34 * to flex descriptor mode.
35 * DPDK uses legacy descriptor. It should set the register back
36 * to the default value, then uses legacy descriptor mode.
38 regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
39 QRXFLXP_CNTXT_RXDID_PRIO_M;
40 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
42 /* Set buffer size as the head split is disabled. */
43 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
44 RTE_PKTMBUF_HEADROOM);
46 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
47 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
48 rxq->max_pkt_len = RTE_MIN(len,
49 dev->data->dev_conf.rxmode.max_rx_pkt_len);
51 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
52 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
53 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
54 PMD_DRV_LOG(ERR, "maximum packet length must "
55 "be larger than %u and smaller than %u,"
56 "as jumbo frame is enabled",
57 (uint32_t)ETHER_MAX_LEN,
58 (uint32_t)ICE_FRAME_SIZE_MAX);
62 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
63 rxq->max_pkt_len > ETHER_MAX_LEN) {
64 PMD_DRV_LOG(ERR, "maximum packet length must be "
65 "larger than %u and smaller than %u, "
66 "as jumbo frame is disabled",
67 (uint32_t)ETHER_MIN_LEN,
68 (uint32_t)ETHER_MAX_LEN);
73 memset(&rx_ctx, 0, sizeof(rx_ctx));
75 rx_ctx.base = rxq->rx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
76 rx_ctx.qlen = rxq->nb_rx_desc;
77 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
78 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
79 rx_ctx.dtype = 0; /* No Header Split mode */
80 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
81 rx_ctx.dsize = 1; /* 32B descriptors */
83 rx_ctx.rxmax = rxq->max_pkt_len;
84 /* TPH: Transaction Layer Packet (TLP) processing hints */
85 rx_ctx.tphrdesc_ena = 1;
86 rx_ctx.tphwdesc_ena = 1;
87 rx_ctx.tphdata_ena = 1;
88 rx_ctx.tphhead_ena = 1;
89 /* Low Receive Queue Threshold defined in 64 descriptors units.
90 * When the number of free descriptors goes below the lrxqthresh,
91 * an immediate interrupt is triggered.
93 rx_ctx.lrxqthresh = 2;
94 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
98 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
100 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
104 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
106 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
111 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
112 RTE_PKTMBUF_HEADROOM);
114 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
116 /* Init the Rx tail register*/
117 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
122 /* Allocate mbufs for all descriptors in rx queue */
124 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
126 struct ice_rx_entry *rxe = rxq->sw_ring;
130 for (i = 0; i < rxq->nb_rx_desc; i++) {
131 volatile union ice_rx_desc *rxd;
132 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
134 if (unlikely(!mbuf)) {
135 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
139 rte_mbuf_refcnt_set(mbuf, 1);
141 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
143 mbuf->port = rxq->port_id;
146 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
148 rxd = &rxq->rx_ring[i];
149 rxd->read.pkt_addr = dma_addr;
150 rxd->read.hdr_addr = 0;
151 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
161 /* Free all mbufs for descriptors in rx queue */
163 ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
167 if (!rxq || !rxq->sw_ring) {
168 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
172 for (i = 0; i < rxq->nb_rx_desc; i++) {
173 if (rxq->sw_ring[i].mbuf) {
174 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
175 rxq->sw_ring[i].mbuf = NULL;
178 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
179 if (rxq->rx_nb_avail == 0)
181 for (i = 0; i < rxq->rx_nb_avail; i++) {
182 struct rte_mbuf *mbuf;
184 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
185 rte_pktmbuf_free_seg(mbuf);
187 rxq->rx_nb_avail = 0;
188 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
191 /* turn on or off rx queue
192 * @q_idx: queue index in pf scope
193 * @on: turn on or off the queue
196 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
201 /* QRX_CTRL = QRX_ENA */
202 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
205 if (reg & QRX_CTRL_QENA_STAT_M)
206 return 0; /* Already on, skip */
207 reg |= QRX_CTRL_QENA_REQ_M;
209 if (!(reg & QRX_CTRL_QENA_STAT_M))
210 return 0; /* Already off, skip */
211 reg &= ~QRX_CTRL_QENA_REQ_M;
214 /* Write the register */
215 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
216 /* Check the result. It is said that QENA_STAT
217 * follows the QENA_REQ not more than 10 use.
218 * TODO: need to change the wait counter later
220 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
221 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
222 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
224 if ((reg & QRX_CTRL_QENA_REQ_M) &&
225 (reg & QRX_CTRL_QENA_STAT_M))
228 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
229 !(reg & QRX_CTRL_QENA_STAT_M))
234 /* Check if it is timeout */
235 if (j >= ICE_CHK_Q_ENA_COUNT) {
236 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
237 (on ? "enable" : "disable"), q_idx);
245 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
246 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
248 ice_check_rx_burst_bulk_alloc_preconditions
249 (__rte_unused struct ice_rx_queue *rxq)
254 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
255 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
256 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
257 "rxq->rx_free_thresh=%d, "
258 "ICE_RX_MAX_BURST=%d",
259 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
261 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
262 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
263 "rxq->rx_free_thresh=%d, "
264 "rxq->nb_rx_desc=%d",
265 rxq->rx_free_thresh, rxq->nb_rx_desc);
267 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
268 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
269 "rxq->nb_rx_desc=%d, "
270 "rxq->rx_free_thresh=%d",
271 rxq->nb_rx_desc, rxq->rx_free_thresh);
281 /* reset fields in ice_rx_queue back to default */
283 ice_reset_rx_queue(struct ice_rx_queue *rxq)
289 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
293 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
294 if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
295 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
297 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
298 len = rxq->nb_rx_desc;
300 for (i = 0; i < len * sizeof(union ice_rx_desc); i++)
301 ((volatile char *)rxq->rx_ring)[i] = 0;
303 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
304 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
305 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
306 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
308 rxq->rx_nb_avail = 0;
309 rxq->rx_next_avail = 0;
310 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
311 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
315 rxq->pkt_first_seg = NULL;
316 rxq->pkt_last_seg = NULL;
320 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
322 struct ice_rx_queue *rxq;
324 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
326 PMD_INIT_FUNC_TRACE();
328 if (rx_queue_id >= dev->data->nb_rx_queues) {
329 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
330 rx_queue_id, dev->data->nb_rx_queues);
334 rxq = dev->data->rx_queues[rx_queue_id];
335 if (!rxq || !rxq->q_set) {
336 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
341 err = ice_program_hw_rx_queue(rxq);
343 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
348 err = ice_alloc_rx_queue_mbufs(rxq);
350 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
356 /* Init the RX tail register. */
357 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
359 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
361 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
364 ice_rx_queue_release_mbufs(rxq);
365 ice_reset_rx_queue(rxq);
369 dev->data->rx_queue_state[rx_queue_id] =
370 RTE_ETH_QUEUE_STATE_STARTED;
376 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
378 struct ice_rx_queue *rxq;
380 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
382 if (rx_queue_id < dev->data->nb_rx_queues) {
383 rxq = dev->data->rx_queues[rx_queue_id];
385 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
387 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
391 ice_rx_queue_release_mbufs(rxq);
392 ice_reset_rx_queue(rxq);
393 dev->data->rx_queue_state[rx_queue_id] =
394 RTE_ETH_QUEUE_STATE_STOPPED;
401 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
403 struct ice_tx_queue *txq;
407 struct ice_aqc_add_tx_qgrp txq_elem;
408 struct ice_tlan_ctx tx_ctx;
410 PMD_INIT_FUNC_TRACE();
412 if (tx_queue_id >= dev->data->nb_tx_queues) {
413 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
414 tx_queue_id, dev->data->nb_tx_queues);
418 txq = dev->data->tx_queues[tx_queue_id];
419 if (!txq || !txq->q_set) {
420 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
426 hw = ICE_VSI_TO_HW(vsi);
428 memset(&txq_elem, 0, sizeof(txq_elem));
429 memset(&tx_ctx, 0, sizeof(tx_ctx));
430 txq_elem.num_txqs = 1;
431 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
433 tx_ctx.base = txq->tx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
434 tx_ctx.qlen = txq->nb_tx_desc;
435 tx_ctx.pf_num = hw->pf_id;
436 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
437 tx_ctx.src_vsi = vsi->vsi_id;
438 tx_ctx.port_num = hw->port_info->lport;
439 tx_ctx.tso_ena = 1; /* tso enable */
440 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
441 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
443 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
446 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
448 /* Init the Tx tail register*/
449 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
451 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1, &txq_elem,
452 sizeof(txq_elem), NULL);
454 PMD_DRV_LOG(ERR, "Failed to add lan txq");
457 /* store the schedule node id */
458 txq->q_teid = txq_elem.txqs[0].q_teid;
460 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
464 /* Free all mbufs for descriptors in tx queue */
466 ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
470 if (!txq || !txq->sw_ring) {
471 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
475 for (i = 0; i < txq->nb_tx_desc; i++) {
476 if (txq->sw_ring[i].mbuf) {
477 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
478 txq->sw_ring[i].mbuf = NULL;
484 ice_reset_tx_queue(struct ice_tx_queue *txq)
486 struct ice_tx_entry *txe;
487 uint16_t i, prev, size;
490 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
495 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
496 for (i = 0; i < size; i++)
497 ((volatile char *)txq->tx_ring)[i] = 0;
499 prev = (uint16_t)(txq->nb_tx_desc - 1);
500 for (i = 0; i < txq->nb_tx_desc; i++) {
501 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
503 txd->cmd_type_offset_bsz =
504 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
507 txe[prev].next_id = i;
511 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
512 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
517 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
518 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
522 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
524 struct ice_tx_queue *txq;
525 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
526 enum ice_status status;
530 if (tx_queue_id >= dev->data->nb_tx_queues) {
531 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
532 tx_queue_id, dev->data->nb_tx_queues);
536 txq = dev->data->tx_queues[tx_queue_id];
538 PMD_DRV_LOG(ERR, "TX queue %u is not available",
543 q_ids[0] = txq->reg_idx;
544 q_teids[0] = txq->q_teid;
546 status = ice_dis_vsi_txq(hw->port_info, 1, q_ids, q_teids,
547 ICE_NO_RESET, 0, NULL);
548 if (status != ICE_SUCCESS) {
549 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
553 ice_tx_queue_release_mbufs(txq);
554 ice_reset_tx_queue(txq);
555 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
561 ice_rx_queue_setup(struct rte_eth_dev *dev,
564 unsigned int socket_id,
565 const struct rte_eth_rxconf *rx_conf,
566 struct rte_mempool *mp)
568 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
569 struct ice_adapter *ad =
570 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
571 struct ice_vsi *vsi = pf->main_vsi;
572 struct ice_rx_queue *rxq;
573 const struct rte_memzone *rz;
576 int use_def_burst_func = 1;
578 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
579 nb_desc > ICE_MAX_RING_DESC ||
580 nb_desc < ICE_MIN_RING_DESC) {
581 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
586 /* Free memory if needed */
587 if (dev->data->rx_queues[queue_idx]) {
588 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
589 dev->data->rx_queues[queue_idx] = NULL;
592 /* Allocate the rx queue data structure */
593 rxq = rte_zmalloc_socket(NULL,
594 sizeof(struct ice_rx_queue),
598 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
599 "rx queue data structure");
603 rxq->nb_rx_desc = nb_desc;
604 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
605 rxq->queue_id = queue_idx;
607 rxq->reg_idx = vsi->base_queue + queue_idx;
608 rxq->port_id = dev->data->port_id;
609 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
610 rxq->crc_len = ETHER_CRC_LEN;
614 rxq->drop_en = rx_conf->rx_drop_en;
616 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
618 /* Allocate the maximun number of RX ring hardware descriptor. */
619 len = ICE_MAX_RING_DESC;
621 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
623 * Allocating a little more memory because vectorized/bulk_alloc Rx
624 * functions doesn't check boundaries each time.
626 len += ICE_RX_MAX_BURST;
629 /* Allocate the maximum number of RX ring hardware descriptor. */
630 ring_size = sizeof(union ice_rx_desc) * len;
631 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
632 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
633 ring_size, ICE_RING_BASE_ALIGN,
636 ice_rx_queue_release(rxq);
637 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
641 /* Zero all the descriptors in the ring. */
642 memset(rz->addr, 0, ring_size);
644 rxq->rx_ring_phys_addr = rz->phys_addr;
645 rxq->rx_ring = (union ice_rx_desc *)rz->addr;
647 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
648 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
653 /* Allocate the software ring. */
654 rxq->sw_ring = rte_zmalloc_socket(NULL,
655 sizeof(struct ice_rx_entry) * len,
659 ice_rx_queue_release(rxq);
660 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
664 ice_reset_rx_queue(rxq);
666 dev->data->rx_queues[queue_idx] = rxq;
668 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
670 if (!use_def_burst_func) {
671 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
672 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
673 "satisfied. Rx Burst Bulk Alloc function will be "
674 "used on port=%d, queue=%d.",
675 rxq->port_id, rxq->queue_id);
676 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
678 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
679 "not satisfied, Scattered Rx is requested, "
680 "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
681 "not enabled on port=%d, queue=%d.",
682 rxq->port_id, rxq->queue_id);
683 ad->rx_bulk_alloc_allowed = false;
690 ice_rx_queue_release(void *rxq)
692 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
695 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
699 ice_rx_queue_release_mbufs(q);
700 rte_free(q->sw_ring);
705 ice_tx_queue_setup(struct rte_eth_dev *dev,
708 unsigned int socket_id,
709 const struct rte_eth_txconf *tx_conf)
711 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
712 struct ice_vsi *vsi = pf->main_vsi;
713 struct ice_tx_queue *txq;
714 const struct rte_memzone *tz;
716 uint16_t tx_rs_thresh, tx_free_thresh;
719 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
721 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
722 nb_desc > ICE_MAX_RING_DESC ||
723 nb_desc < ICE_MIN_RING_DESC) {
724 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
730 * The following two parameters control the setting of the RS bit on
731 * transmit descriptors. TX descriptors will have their RS bit set
732 * after txq->tx_rs_thresh descriptors have been used. The TX
733 * descriptor ring will be cleaned after txq->tx_free_thresh
734 * descriptors are used or if the number of descriptors required to
735 * transmit a packet is greater than the number of free TX descriptors.
737 * The following constraints must be satisfied:
738 * - tx_rs_thresh must be greater than 0.
739 * - tx_rs_thresh must be less than the size of the ring minus 2.
740 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
741 * - tx_rs_thresh must be a divisor of the ring size.
742 * - tx_free_thresh must be greater than 0.
743 * - tx_free_thresh must be less than the size of the ring minus 3.
745 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
746 * race condition, hence the maximum threshold constraints. When set
747 * to zero use default values.
749 tx_rs_thresh = (uint16_t)(tx_conf->tx_rs_thresh ?
750 tx_conf->tx_rs_thresh :
751 ICE_DEFAULT_TX_RSBIT_THRESH);
752 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
753 tx_conf->tx_free_thresh :
754 ICE_DEFAULT_TX_FREE_THRESH);
755 if (tx_rs_thresh >= (nb_desc - 2)) {
756 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
757 "number of TX descriptors minus 2. "
758 "(tx_rs_thresh=%u port=%d queue=%d)",
759 (unsigned int)tx_rs_thresh,
760 (int)dev->data->port_id,
764 if (tx_free_thresh >= (nb_desc - 3)) {
765 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
766 "tx_free_thresh must be less than the "
767 "number of TX descriptors minus 3. "
768 "(tx_free_thresh=%u port=%d queue=%d)",
769 (unsigned int)tx_free_thresh,
770 (int)dev->data->port_id,
774 if (tx_rs_thresh > tx_free_thresh) {
775 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
776 "equal to tx_free_thresh. (tx_free_thresh=%u"
777 " tx_rs_thresh=%u port=%d queue=%d)",
778 (unsigned int)tx_free_thresh,
779 (unsigned int)tx_rs_thresh,
780 (int)dev->data->port_id,
784 if ((nb_desc % tx_rs_thresh) != 0) {
785 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
786 "number of TX descriptors. (tx_rs_thresh=%u"
787 " port=%d queue=%d)",
788 (unsigned int)tx_rs_thresh,
789 (int)dev->data->port_id,
793 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
794 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
795 "tx_rs_thresh is greater than 1. "
796 "(tx_rs_thresh=%u port=%d queue=%d)",
797 (unsigned int)tx_rs_thresh,
798 (int)dev->data->port_id,
803 /* Free memory if needed. */
804 if (dev->data->tx_queues[queue_idx]) {
805 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
806 dev->data->tx_queues[queue_idx] = NULL;
809 /* Allocate the TX queue data structure. */
810 txq = rte_zmalloc_socket(NULL,
811 sizeof(struct ice_tx_queue),
815 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
816 "tx queue structure");
820 /* Allocate TX hardware ring descriptors. */
821 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
822 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
823 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
824 ring_size, ICE_RING_BASE_ALIGN,
827 ice_tx_queue_release(txq);
828 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
832 txq->nb_tx_desc = nb_desc;
833 txq->tx_rs_thresh = tx_rs_thresh;
834 txq->tx_free_thresh = tx_free_thresh;
835 txq->pthresh = tx_conf->tx_thresh.pthresh;
836 txq->hthresh = tx_conf->tx_thresh.hthresh;
837 txq->wthresh = tx_conf->tx_thresh.wthresh;
838 txq->queue_id = queue_idx;
840 txq->reg_idx = vsi->base_queue + queue_idx;
841 txq->port_id = dev->data->port_id;
842 txq->offloads = offloads;
844 txq->tx_deferred_start = tx_conf->tx_deferred_start;
846 txq->tx_ring_phys_addr = tz->phys_addr;
847 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
849 /* Allocate software ring */
851 rte_zmalloc_socket(NULL,
852 sizeof(struct ice_tx_entry) * nb_desc,
856 ice_tx_queue_release(txq);
857 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
861 ice_reset_tx_queue(txq);
863 dev->data->tx_queues[queue_idx] = txq;
869 ice_tx_queue_release(void *txq)
871 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
874 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
878 ice_tx_queue_release_mbufs(q);
879 rte_free(q->sw_ring);
884 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
885 struct rte_eth_rxq_info *qinfo)
887 struct ice_rx_queue *rxq;
889 rxq = dev->data->rx_queues[queue_id];
892 qinfo->scattered_rx = dev->data->scattered_rx;
893 qinfo->nb_desc = rxq->nb_rx_desc;
895 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
896 qinfo->conf.rx_drop_en = rxq->drop_en;
897 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
901 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
902 struct rte_eth_txq_info *qinfo)
904 struct ice_tx_queue *txq;
906 txq = dev->data->tx_queues[queue_id];
908 qinfo->nb_desc = txq->nb_tx_desc;
910 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
911 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
912 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
914 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
915 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
916 qinfo->conf.offloads = txq->offloads;
917 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
921 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
923 #define ICE_RXQ_SCAN_INTERVAL 4
924 volatile union ice_rx_desc *rxdp;
925 struct ice_rx_queue *rxq;
928 rxq = dev->data->rx_queues[rx_queue_id];
929 rxdp = &rxq->rx_ring[rxq->rx_tail];
930 while ((desc < rxq->nb_rx_desc) &&
931 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
932 ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S) &
933 (1 << ICE_RX_DESC_STATUS_DD_S)) {
935 * Check the DD bit of a rx descriptor of each 4 in a group,
936 * to avoid checking too frequently and downgrading performance
939 desc += ICE_RXQ_SCAN_INTERVAL;
940 rxdp += ICE_RXQ_SCAN_INTERVAL;
941 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
942 rxdp = &(rxq->rx_ring[rxq->rx_tail +
943 desc - rxq->nb_rx_desc]);
949 /* Rx L3/L4 checksum */
950 static inline uint64_t
951 ice_rxd_error_to_pkt_flags(uint64_t qword)
954 uint64_t error_bits = (qword >> ICE_RXD_QW1_ERROR_S);
956 if (likely((error_bits & ICE_RX_ERR_BITS) == 0)) {
957 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
961 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_IPE_S)))
962 flags |= PKT_RX_IP_CKSUM_BAD;
964 flags |= PKT_RX_IP_CKSUM_GOOD;
966 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_L4E_S)))
967 flags |= PKT_RX_L4_CKSUM_BAD;
969 flags |= PKT_RX_L4_CKSUM_GOOD;
971 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_EIPE_S)))
972 flags |= PKT_RX_EIP_CKSUM_BAD;
978 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)
980 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
981 (1 << ICE_RX_DESC_STATUS_L2TAG1P_S)) {
982 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
984 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
985 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
986 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
991 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
992 if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
993 (1 << ICE_RX_DESC_EXT_STATUS_L2TAG2P_S)) {
994 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
995 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
996 mb->vlan_tci_outer = mb->vlan_tci;
997 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
998 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
999 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
1000 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
1002 mb->vlan_tci_outer = 0;
1005 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1006 mb->vlan_tci, mb->vlan_tci_outer);
1010 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1012 static const uint32_t ptypes[] = {
1013 /* refers to ice_get_default_pkt_type() */
1015 RTE_PTYPE_L2_ETHER_LLDP,
1016 RTE_PTYPE_L2_ETHER_ARP,
1017 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1018 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1021 RTE_PTYPE_L4_NONFRAG,
1025 RTE_PTYPE_TUNNEL_GRENAT,
1026 RTE_PTYPE_TUNNEL_IP,
1027 RTE_PTYPE_INNER_L2_ETHER,
1028 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1029 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1030 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1031 RTE_PTYPE_INNER_L4_FRAG,
1032 RTE_PTYPE_INNER_L4_ICMP,
1033 RTE_PTYPE_INNER_L4_NONFRAG,
1034 RTE_PTYPE_INNER_L4_SCTP,
1035 RTE_PTYPE_INNER_L4_TCP,
1036 RTE_PTYPE_INNER_L4_UDP,
1037 RTE_PTYPE_TUNNEL_GTPC,
1038 RTE_PTYPE_TUNNEL_GTPU,
1042 if (dev->rx_pkt_burst == ice_recv_pkts)
1048 ice_clear_queues(struct rte_eth_dev *dev)
1052 PMD_INIT_FUNC_TRACE();
1054 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1055 ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
1056 ice_reset_tx_queue(dev->data->tx_queues[i]);
1059 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1060 ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
1061 ice_reset_rx_queue(dev->data->rx_queues[i]);
1066 ice_free_queues(struct rte_eth_dev *dev)
1070 PMD_INIT_FUNC_TRACE();
1072 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1073 if (!dev->data->rx_queues[i])
1075 ice_rx_queue_release(dev->data->rx_queues[i]);
1076 dev->data->rx_queues[i] = NULL;
1078 dev->data->nb_rx_queues = 0;
1080 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1081 if (!dev->data->tx_queues[i])
1083 ice_tx_queue_release(dev->data->tx_queues[i]);
1084 dev->data->tx_queues[i] = NULL;
1086 dev->data->nb_tx_queues = 0;
1090 ice_recv_pkts(void *rx_queue,
1091 struct rte_mbuf **rx_pkts,
1094 struct ice_rx_queue *rxq = rx_queue;
1095 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1096 volatile union ice_rx_desc *rxdp;
1097 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1098 struct ice_rx_entry *rxe;
1099 struct rte_mbuf *nmb; /* new allocated mbuf */
1100 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1101 uint16_t rx_id = rxq->rx_tail;
1103 uint16_t nb_hold = 0;
1104 uint16_t rx_packet_len;
1108 uint64_t pkt_flags = 0;
1109 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1110 struct rte_eth_dev *dev;
1112 while (nb_rx < nb_pkts) {
1113 rxdp = &rx_ring[rx_id];
1114 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1115 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1116 ICE_RXD_QW1_STATUS_S;
1118 /* Check the DD bit first */
1119 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1123 nmb = rte_mbuf_raw_alloc(rxq->mp);
1124 if (unlikely(!nmb)) {
1125 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1126 dev->data->rx_mbuf_alloc_failed++;
1131 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1133 if (unlikely(rx_id == rxq->nb_rx_desc))
1138 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1141 * fill the read format of descriptor with physic address in
1142 * new allocated mbuf: nmb
1144 rxdp->read.hdr_addr = 0;
1145 rxdp->read.pkt_addr = dma_addr;
1147 /* calculate rx_packet_len of the received pkt */
1148 rx_packet_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1149 ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
1151 /* fill old mbuf with received descriptor: rxd */
1152 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1153 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1156 rxm->pkt_len = rx_packet_len;
1157 rxm->data_len = rx_packet_len;
1158 rxm->port = rxq->port_id;
1159 ice_rxd_to_vlan_tci(rxm, rxdp);
1160 rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
1161 ICE_RXD_QW1_PTYPE_M) >>
1162 ICE_RXD_QW1_PTYPE_S)];
1163 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1164 rxm->ol_flags |= pkt_flags;
1165 /* copy old mbuf to rx_pkts */
1166 rx_pkts[nb_rx++] = rxm;
1168 rxq->rx_tail = rx_id;
1170 * If the number of free RX descriptors is greater than the RX free
1171 * threshold of the queue, advance the receive tail register of queue.
1172 * Update that register with the value of the last processed RX
1173 * descriptor minus 1.
1175 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1176 if (nb_hold > rxq->rx_free_thresh) {
1177 rx_id = (uint16_t)(rx_id == 0 ?
1178 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1179 /* write TAIL register */
1180 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1183 rxq->nb_rx_hold = nb_hold;
1185 /* return received packet in the burst */
1190 ice_txd_enable_checksum(uint64_t ol_flags,
1192 uint32_t *td_offset,
1193 union ice_tx_offload tx_offload)
1195 /* L2 length must be set. */
1196 *td_offset |= (tx_offload.l2_len >> 1) <<
1197 ICE_TX_DESC_LEN_MACLEN_S;
1199 /* Enable L3 checksum offloads */
1200 if (ol_flags & PKT_TX_IP_CKSUM) {
1201 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1202 *td_offset |= (tx_offload.l3_len >> 2) <<
1203 ICE_TX_DESC_LEN_IPLEN_S;
1204 } else if (ol_flags & PKT_TX_IPV4) {
1205 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1206 *td_offset |= (tx_offload.l3_len >> 2) <<
1207 ICE_TX_DESC_LEN_IPLEN_S;
1208 } else if (ol_flags & PKT_TX_IPV6) {
1209 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1210 *td_offset |= (tx_offload.l3_len >> 2) <<
1211 ICE_TX_DESC_LEN_IPLEN_S;
1214 if (ol_flags & PKT_TX_TCP_SEG) {
1215 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1216 *td_offset |= (tx_offload.l4_len >> 2) <<
1217 ICE_TX_DESC_LEN_L4_LEN_S;
1221 /* Enable L4 checksum offloads */
1222 switch (ol_flags & PKT_TX_L4_MASK) {
1223 case PKT_TX_TCP_CKSUM:
1224 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1225 *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
1226 ICE_TX_DESC_LEN_L4_LEN_S;
1228 case PKT_TX_SCTP_CKSUM:
1229 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1230 *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
1231 ICE_TX_DESC_LEN_L4_LEN_S;
1233 case PKT_TX_UDP_CKSUM:
1234 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1235 *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
1236 ICE_TX_DESC_LEN_L4_LEN_S;
1244 ice_xmit_cleanup(struct ice_tx_queue *txq)
1246 struct ice_tx_entry *sw_ring = txq->sw_ring;
1247 volatile struct ice_tx_desc *txd = txq->tx_ring;
1248 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1249 uint16_t nb_tx_desc = txq->nb_tx_desc;
1250 uint16_t desc_to_clean_to;
1251 uint16_t nb_tx_to_clean;
1253 /* Determine the last descriptor needing to be cleaned */
1254 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
1255 if (desc_to_clean_to >= nb_tx_desc)
1256 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1258 /* Check to make sure the last descriptor to clean is done */
1259 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1260 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
1261 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
1262 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1263 "(port=%d queue=%d) value=0x%"PRIx64"\n",
1265 txq->port_id, txq->queue_id,
1266 txd[desc_to_clean_to].cmd_type_offset_bsz);
1267 /* Failed to clean any descriptors */
1271 /* Figure out how many descriptors will be cleaned */
1272 if (last_desc_cleaned > desc_to_clean_to)
1273 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1276 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1279 /* The last descriptor to clean is done, so that means all the
1280 * descriptors from the last descriptor that was cleaned
1281 * up to the last descriptor with the RS bit set
1282 * are done. Only reset the threshold descriptor.
1284 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1286 /* Update the txq to reflect the last descriptor that was cleaned */
1287 txq->last_desc_cleaned = desc_to_clean_to;
1288 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
1293 /* Check if the context descriptor is needed for TX offloading */
1294 static inline uint16_t
1295 ice_calc_context_desc(uint64_t flags)
1297 static uint64_t mask = PKT_TX_TCP_SEG | PKT_TX_QINQ;
1299 return (flags & mask) ? 1 : 0;
1302 /* set ice TSO context descriptor */
1303 static inline uint64_t
1304 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
1306 uint64_t ctx_desc = 0;
1307 uint32_t cd_cmd, hdr_len, cd_tso_len;
1309 if (!tx_offload.l4_len) {
1310 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1315 * in case of non tunneling packet, the outer_l2_len and
1316 * outer_l3_len must be 0.
1318 hdr_len = tx_offload.outer_l2_len +
1319 tx_offload.outer_l3_len +
1324 cd_cmd = ICE_TX_CTX_DESC_TSO;
1325 cd_tso_len = mbuf->pkt_len - hdr_len;
1326 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
1327 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1328 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
1334 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1336 struct ice_tx_queue *txq;
1337 volatile struct ice_tx_desc *tx_ring;
1338 volatile struct ice_tx_desc *txd;
1339 struct ice_tx_entry *sw_ring;
1340 struct ice_tx_entry *txe, *txn;
1341 struct rte_mbuf *tx_pkt;
1342 struct rte_mbuf *m_seg;
1347 uint32_t td_cmd = 0;
1348 uint32_t td_offset = 0;
1349 uint32_t td_tag = 0;
1351 uint64_t buf_dma_addr;
1353 union ice_tx_offload tx_offload = {0};
1356 sw_ring = txq->sw_ring;
1357 tx_ring = txq->tx_ring;
1358 tx_id = txq->tx_tail;
1359 txe = &sw_ring[tx_id];
1361 /* Check if the descriptor ring needs to be cleaned. */
1362 if (txq->nb_tx_free < txq->tx_free_thresh)
1363 ice_xmit_cleanup(txq);
1365 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1366 tx_pkt = *tx_pkts++;
1369 ol_flags = tx_pkt->ol_flags;
1370 tx_offload.l2_len = tx_pkt->l2_len;
1371 tx_offload.l3_len = tx_pkt->l3_len;
1372 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
1373 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
1374 tx_offload.l4_len = tx_pkt->l4_len;
1375 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1376 /* Calculate the number of context descriptors needed. */
1377 nb_ctx = ice_calc_context_desc(ol_flags);
1379 /* The number of descriptors that must be allocated for
1380 * a packet equals to the number of the segments of that
1381 * packet plus the number of context descriptor if needed.
1383 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1384 tx_last = (uint16_t)(tx_id + nb_used - 1);
1387 if (tx_last >= txq->nb_tx_desc)
1388 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1390 if (nb_used > txq->nb_tx_free) {
1391 if (ice_xmit_cleanup(txq) != 0) {
1396 if (unlikely(nb_used > txq->tx_rs_thresh)) {
1397 while (nb_used > txq->nb_tx_free) {
1398 if (ice_xmit_cleanup(txq) != 0) {
1407 /* Descriptor based VLAN insertion */
1408 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
1409 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
1410 td_tag = tx_pkt->vlan_tci;
1413 /* Enable checksum offloading */
1414 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
1415 ice_txd_enable_checksum(ol_flags, &td_cmd,
1416 &td_offset, tx_offload);
1420 /* Setup TX context descriptor if required */
1421 volatile struct ice_tx_ctx_desc *ctx_txd =
1422 (volatile struct ice_tx_ctx_desc *)
1424 uint16_t cd_l2tag2 = 0;
1425 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
1427 txn = &sw_ring[txe->next_id];
1428 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1430 rte_pktmbuf_free_seg(txe->mbuf);
1434 if (ol_flags & PKT_TX_TCP_SEG)
1435 cd_type_cmd_tso_mss |=
1436 ice_set_tso_ctx(tx_pkt, tx_offload);
1438 /* TX context descriptor based double VLAN insert */
1439 if (ol_flags & PKT_TX_QINQ) {
1440 cd_l2tag2 = tx_pkt->vlan_tci_outer;
1441 cd_type_cmd_tso_mss |=
1442 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
1443 ICE_TXD_CTX_QW1_CMD_S);
1445 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
1447 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1449 txe->last_id = tx_last;
1450 tx_id = txe->next_id;
1456 txd = &tx_ring[tx_id];
1457 txn = &sw_ring[txe->next_id];
1460 rte_pktmbuf_free_seg(txe->mbuf);
1463 /* Setup TX Descriptor */
1464 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1465 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
1466 txd->cmd_type_offset_bsz =
1467 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
1468 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
1469 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
1470 ((uint64_t)m_seg->data_len <<
1471 ICE_TXD_QW1_TX_BUF_SZ_S) |
1472 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
1474 txe->last_id = tx_last;
1475 tx_id = txe->next_id;
1477 m_seg = m_seg->next;
1480 /* fill the last descriptor with End of Packet (EOP) bit */
1481 td_cmd |= ICE_TX_DESC_CMD_EOP;
1482 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
1483 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
1485 /* set RS bit on the last descriptor of one packet */
1486 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
1487 PMD_TX_FREE_LOG(DEBUG,
1488 "Setting RS bit on TXD id="
1489 "%4u (port=%d queue=%d)",
1490 tx_last, txq->port_id, txq->queue_id);
1492 td_cmd |= ICE_TX_DESC_CMD_RS;
1494 /* Update txq RS bit counters */
1495 txq->nb_tx_used = 0;
1497 txd->cmd_type_offset_bsz |=
1498 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
1504 /* update Tail register */
1505 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
1506 txq->tx_tail = tx_id;
1511 void __attribute__((cold))
1512 ice_set_rx_function(struct rte_eth_dev *dev)
1514 dev->rx_pkt_burst = ice_recv_pkts;
1517 /*********************************************************************
1521 **********************************************************************/
1522 /* The default values of TSO MSS */
1523 #define ICE_MIN_TSO_MSS 64
1524 #define ICE_MAX_TSO_MSS 9728
1525 #define ICE_MAX_TSO_FRAME_SIZE 262144
1527 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1534 for (i = 0; i < nb_pkts; i++) {
1536 ol_flags = m->ol_flags;
1538 if (ol_flags & PKT_TX_TCP_SEG &&
1539 (m->tso_segsz < ICE_MIN_TSO_MSS ||
1540 m->tso_segsz > ICE_MAX_TSO_MSS ||
1541 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
1543 * MSS outside the range are considered malicious
1545 rte_errno = -EINVAL;
1549 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1550 ret = rte_validate_tx_offload(m);
1556 ret = rte_net_intel_cksum_prepare(m);
1565 void __attribute__((cold))
1566 ice_set_tx_function(struct rte_eth_dev *dev)
1568 dev->tx_pkt_burst = ice_xmit_pkts;
1569 dev->tx_pkt_prepare = ice_prep_pkts;
1572 /* For each value it means, datasheet of hardware can tell more details
1574 * @note: fix ice_dev_supported_ptypes_get() if any change here.
1576 static inline uint32_t
1577 ice_get_default_pkt_type(uint16_t ptype)
1579 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
1580 __rte_cache_aligned = {
1583 [1] = RTE_PTYPE_L2_ETHER,
1584 /* [2] - [5] reserved */
1585 [6] = RTE_PTYPE_L2_ETHER_LLDP,
1586 /* [7] - [10] reserved */
1587 [11] = RTE_PTYPE_L2_ETHER_ARP,
1588 /* [12] - [21] reserved */
1590 /* Non tunneled IPv4 */
1591 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1593 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1594 RTE_PTYPE_L4_NONFRAG,
1595 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1598 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1600 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1602 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1606 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1607 RTE_PTYPE_TUNNEL_IP |
1608 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1609 RTE_PTYPE_INNER_L4_FRAG,
1610 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1611 RTE_PTYPE_TUNNEL_IP |
1612 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1613 RTE_PTYPE_INNER_L4_NONFRAG,
1614 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1615 RTE_PTYPE_TUNNEL_IP |
1616 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1617 RTE_PTYPE_INNER_L4_UDP,
1619 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1620 RTE_PTYPE_TUNNEL_IP |
1621 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1622 RTE_PTYPE_INNER_L4_TCP,
1623 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1624 RTE_PTYPE_TUNNEL_IP |
1625 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1626 RTE_PTYPE_INNER_L4_SCTP,
1627 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1628 RTE_PTYPE_TUNNEL_IP |
1629 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1630 RTE_PTYPE_INNER_L4_ICMP,
1633 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1634 RTE_PTYPE_TUNNEL_IP |
1635 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1636 RTE_PTYPE_INNER_L4_FRAG,
1637 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1638 RTE_PTYPE_TUNNEL_IP |
1639 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1640 RTE_PTYPE_INNER_L4_NONFRAG,
1641 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1642 RTE_PTYPE_TUNNEL_IP |
1643 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1644 RTE_PTYPE_INNER_L4_UDP,
1646 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1647 RTE_PTYPE_TUNNEL_IP |
1648 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1649 RTE_PTYPE_INNER_L4_TCP,
1650 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1651 RTE_PTYPE_TUNNEL_IP |
1652 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1653 RTE_PTYPE_INNER_L4_SCTP,
1654 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1655 RTE_PTYPE_TUNNEL_IP |
1656 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1657 RTE_PTYPE_INNER_L4_ICMP,
1659 /* IPv4 --> GRE/Teredo/VXLAN */
1660 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1661 RTE_PTYPE_TUNNEL_GRENAT,
1663 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
1664 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1665 RTE_PTYPE_TUNNEL_GRENAT |
1666 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1667 RTE_PTYPE_INNER_L4_FRAG,
1668 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1669 RTE_PTYPE_TUNNEL_GRENAT |
1670 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1671 RTE_PTYPE_INNER_L4_NONFRAG,
1672 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1673 RTE_PTYPE_TUNNEL_GRENAT |
1674 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1675 RTE_PTYPE_INNER_L4_UDP,
1677 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1678 RTE_PTYPE_TUNNEL_GRENAT |
1679 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1680 RTE_PTYPE_INNER_L4_TCP,
1681 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1682 RTE_PTYPE_TUNNEL_GRENAT |
1683 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1684 RTE_PTYPE_INNER_L4_SCTP,
1685 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1686 RTE_PTYPE_TUNNEL_GRENAT |
1687 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1688 RTE_PTYPE_INNER_L4_ICMP,
1690 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
1691 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1692 RTE_PTYPE_TUNNEL_GRENAT |
1693 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1694 RTE_PTYPE_INNER_L4_FRAG,
1695 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1696 RTE_PTYPE_TUNNEL_GRENAT |
1697 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1698 RTE_PTYPE_INNER_L4_NONFRAG,
1699 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1700 RTE_PTYPE_TUNNEL_GRENAT |
1701 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1702 RTE_PTYPE_INNER_L4_UDP,
1704 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1705 RTE_PTYPE_TUNNEL_GRENAT |
1706 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1707 RTE_PTYPE_INNER_L4_TCP,
1708 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1709 RTE_PTYPE_TUNNEL_GRENAT |
1710 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1711 RTE_PTYPE_INNER_L4_SCTP,
1712 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1713 RTE_PTYPE_TUNNEL_GRENAT |
1714 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1715 RTE_PTYPE_INNER_L4_ICMP,
1717 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
1718 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1719 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
1721 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
1722 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1723 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1724 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1725 RTE_PTYPE_INNER_L4_FRAG,
1726 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1727 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1728 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1729 RTE_PTYPE_INNER_L4_NONFRAG,
1730 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1731 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1732 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1733 RTE_PTYPE_INNER_L4_UDP,
1735 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1736 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1737 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1738 RTE_PTYPE_INNER_L4_TCP,
1739 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1740 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1741 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1742 RTE_PTYPE_INNER_L4_SCTP,
1743 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1744 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1745 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1746 RTE_PTYPE_INNER_L4_ICMP,
1748 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
1749 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1750 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1751 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1752 RTE_PTYPE_INNER_L4_FRAG,
1753 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1754 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1755 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1756 RTE_PTYPE_INNER_L4_NONFRAG,
1757 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1758 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1759 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1760 RTE_PTYPE_INNER_L4_UDP,
1762 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1763 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1764 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1765 RTE_PTYPE_INNER_L4_TCP,
1766 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1767 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1768 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1769 RTE_PTYPE_INNER_L4_SCTP,
1770 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1771 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1772 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1773 RTE_PTYPE_INNER_L4_ICMP,
1775 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
1776 [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1777 RTE_PTYPE_TUNNEL_GRENAT |
1778 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1780 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
1781 [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1782 RTE_PTYPE_TUNNEL_GRENAT |
1783 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1784 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1785 RTE_PTYPE_INNER_L4_FRAG,
1786 [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1787 RTE_PTYPE_TUNNEL_GRENAT |
1788 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1789 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1790 RTE_PTYPE_INNER_L4_NONFRAG,
1791 [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1792 RTE_PTYPE_TUNNEL_GRENAT |
1793 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1794 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1795 RTE_PTYPE_INNER_L4_UDP,
1797 [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1798 RTE_PTYPE_TUNNEL_GRENAT |
1799 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1800 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1801 RTE_PTYPE_INNER_L4_TCP,
1802 [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1803 RTE_PTYPE_TUNNEL_GRENAT |
1804 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1805 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1806 RTE_PTYPE_INNER_L4_SCTP,
1807 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1808 RTE_PTYPE_TUNNEL_GRENAT |
1809 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1810 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1811 RTE_PTYPE_INNER_L4_ICMP,
1813 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
1814 [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1815 RTE_PTYPE_TUNNEL_GRENAT |
1816 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1817 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1818 RTE_PTYPE_INNER_L4_FRAG,
1819 [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1820 RTE_PTYPE_TUNNEL_GRENAT |
1821 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1822 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1823 RTE_PTYPE_INNER_L4_NONFRAG,
1824 [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1825 RTE_PTYPE_TUNNEL_GRENAT |
1826 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1827 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1828 RTE_PTYPE_INNER_L4_UDP,
1830 [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1831 RTE_PTYPE_TUNNEL_GRENAT |
1832 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1833 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1834 RTE_PTYPE_INNER_L4_TCP,
1835 [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1836 RTE_PTYPE_TUNNEL_GRENAT |
1837 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1838 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1839 RTE_PTYPE_INNER_L4_SCTP,
1840 [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1841 RTE_PTYPE_TUNNEL_GRENAT |
1842 RTE_PTYPE_INNER_L2_ETHER_VLAN |
1843 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1844 RTE_PTYPE_INNER_L4_ICMP,
1846 /* Non tunneled IPv6 */
1847 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1849 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1850 RTE_PTYPE_L4_NONFRAG,
1851 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1854 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1856 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1858 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1862 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1863 RTE_PTYPE_TUNNEL_IP |
1864 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1865 RTE_PTYPE_INNER_L4_FRAG,
1866 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1867 RTE_PTYPE_TUNNEL_IP |
1868 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1869 RTE_PTYPE_INNER_L4_NONFRAG,
1870 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1871 RTE_PTYPE_TUNNEL_IP |
1872 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1873 RTE_PTYPE_INNER_L4_UDP,
1875 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1876 RTE_PTYPE_TUNNEL_IP |
1877 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1878 RTE_PTYPE_INNER_L4_TCP,
1879 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1880 RTE_PTYPE_TUNNEL_IP |
1881 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1882 RTE_PTYPE_INNER_L4_SCTP,
1883 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1884 RTE_PTYPE_TUNNEL_IP |
1885 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1886 RTE_PTYPE_INNER_L4_ICMP,
1889 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1890 RTE_PTYPE_TUNNEL_IP |
1891 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1892 RTE_PTYPE_INNER_L4_FRAG,
1893 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1894 RTE_PTYPE_TUNNEL_IP |
1895 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1896 RTE_PTYPE_INNER_L4_NONFRAG,
1897 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1898 RTE_PTYPE_TUNNEL_IP |
1899 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1900 RTE_PTYPE_INNER_L4_UDP,
1901 /* [105] reserved */
1902 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1903 RTE_PTYPE_TUNNEL_IP |
1904 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1905 RTE_PTYPE_INNER_L4_TCP,
1906 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1907 RTE_PTYPE_TUNNEL_IP |
1908 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1909 RTE_PTYPE_INNER_L4_SCTP,
1910 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1911 RTE_PTYPE_TUNNEL_IP |
1912 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1913 RTE_PTYPE_INNER_L4_ICMP,
1915 /* IPv6 --> GRE/Teredo/VXLAN */
1916 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1917 RTE_PTYPE_TUNNEL_GRENAT,
1919 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
1920 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1921 RTE_PTYPE_TUNNEL_GRENAT |
1922 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1923 RTE_PTYPE_INNER_L4_FRAG,
1924 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1925 RTE_PTYPE_TUNNEL_GRENAT |
1926 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1927 RTE_PTYPE_INNER_L4_NONFRAG,
1928 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1929 RTE_PTYPE_TUNNEL_GRENAT |
1930 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1931 RTE_PTYPE_INNER_L4_UDP,
1932 /* [113] reserved */
1933 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1934 RTE_PTYPE_TUNNEL_GRENAT |
1935 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1936 RTE_PTYPE_INNER_L4_TCP,
1937 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1938 RTE_PTYPE_TUNNEL_GRENAT |
1939 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1940 RTE_PTYPE_INNER_L4_SCTP,
1941 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1942 RTE_PTYPE_TUNNEL_GRENAT |
1943 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1944 RTE_PTYPE_INNER_L4_ICMP,
1946 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
1947 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1948 RTE_PTYPE_TUNNEL_GRENAT |
1949 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1950 RTE_PTYPE_INNER_L4_FRAG,
1951 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1952 RTE_PTYPE_TUNNEL_GRENAT |
1953 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1954 RTE_PTYPE_INNER_L4_NONFRAG,
1955 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1956 RTE_PTYPE_TUNNEL_GRENAT |
1957 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1958 RTE_PTYPE_INNER_L4_UDP,
1959 /* [120] reserved */
1960 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1961 RTE_PTYPE_TUNNEL_GRENAT |
1962 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1963 RTE_PTYPE_INNER_L4_TCP,
1964 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1965 RTE_PTYPE_TUNNEL_GRENAT |
1966 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1967 RTE_PTYPE_INNER_L4_SCTP,
1968 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1969 RTE_PTYPE_TUNNEL_GRENAT |
1970 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
1971 RTE_PTYPE_INNER_L4_ICMP,
1973 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
1974 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1975 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
1977 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
1978 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1979 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1980 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1981 RTE_PTYPE_INNER_L4_FRAG,
1982 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1983 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1984 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1985 RTE_PTYPE_INNER_L4_NONFRAG,
1986 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1987 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1988 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1989 RTE_PTYPE_INNER_L4_UDP,
1990 /* [128] reserved */
1991 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1992 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1993 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1994 RTE_PTYPE_INNER_L4_TCP,
1995 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
1996 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
1997 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
1998 RTE_PTYPE_INNER_L4_SCTP,
1999 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2000 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2001 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2002 RTE_PTYPE_INNER_L4_ICMP,
2004 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2005 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2006 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2007 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2008 RTE_PTYPE_INNER_L4_FRAG,
2009 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2010 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2011 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2012 RTE_PTYPE_INNER_L4_NONFRAG,
2013 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2014 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2015 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2016 RTE_PTYPE_INNER_L4_UDP,
2017 /* [135] reserved */
2018 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2019 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2020 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2021 RTE_PTYPE_INNER_L4_TCP,
2022 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2023 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2024 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2025 RTE_PTYPE_INNER_L4_SCTP,
2026 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2027 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2028 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2029 RTE_PTYPE_INNER_L4_ICMP,
2031 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2032 [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2033 RTE_PTYPE_TUNNEL_GRENAT |
2034 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2036 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2037 [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2038 RTE_PTYPE_TUNNEL_GRENAT |
2039 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2040 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2041 RTE_PTYPE_INNER_L4_FRAG,
2042 [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2043 RTE_PTYPE_TUNNEL_GRENAT |
2044 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2045 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2046 RTE_PTYPE_INNER_L4_NONFRAG,
2047 [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2048 RTE_PTYPE_TUNNEL_GRENAT |
2049 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2050 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2051 RTE_PTYPE_INNER_L4_UDP,
2052 /* [143] reserved */
2053 [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2054 RTE_PTYPE_TUNNEL_GRENAT |
2055 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2056 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2057 RTE_PTYPE_INNER_L4_TCP,
2058 [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2059 RTE_PTYPE_TUNNEL_GRENAT |
2060 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2061 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2062 RTE_PTYPE_INNER_L4_SCTP,
2063 [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2064 RTE_PTYPE_TUNNEL_GRENAT |
2065 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2066 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2067 RTE_PTYPE_INNER_L4_ICMP,
2069 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
2070 [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2071 RTE_PTYPE_TUNNEL_GRENAT |
2072 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2073 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2074 RTE_PTYPE_INNER_L4_FRAG,
2075 [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2076 RTE_PTYPE_TUNNEL_GRENAT |
2077 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2078 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2079 RTE_PTYPE_INNER_L4_NONFRAG,
2080 [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2081 RTE_PTYPE_TUNNEL_GRENAT |
2082 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2083 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2084 RTE_PTYPE_INNER_L4_UDP,
2085 /* [150] reserved */
2086 [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2087 RTE_PTYPE_TUNNEL_GRENAT |
2088 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2089 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2090 RTE_PTYPE_INNER_L4_TCP,
2091 [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2092 RTE_PTYPE_TUNNEL_GRENAT |
2093 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2094 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2095 RTE_PTYPE_INNER_L4_SCTP,
2096 [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2097 RTE_PTYPE_TUNNEL_GRENAT |
2098 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2099 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2100 RTE_PTYPE_INNER_L4_ICMP,
2101 /* [154] - [255] reserved */
2102 [256] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2103 RTE_PTYPE_TUNNEL_GTPC,
2104 [257] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2105 RTE_PTYPE_TUNNEL_GTPC,
2106 [258] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2107 RTE_PTYPE_TUNNEL_GTPU,
2108 [259] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2109 RTE_PTYPE_TUNNEL_GTPU,
2110 /* [260] - [263] reserved */
2111 [264] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2112 RTE_PTYPE_TUNNEL_GTPC,
2113 [265] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2114 RTE_PTYPE_TUNNEL_GTPC,
2115 [266] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2116 RTE_PTYPE_TUNNEL_GTPU,
2117 [267] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2118 RTE_PTYPE_TUNNEL_GTPU,
2120 /* All others reserved */
2123 return type_table[ptype];
2126 void __attribute__((cold))
2127 ice_set_default_ptype_table(struct rte_eth_dev *dev)
2129 struct ice_adapter *ad =
2130 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2133 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
2134 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);