1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
10 #define ICE_TD_CMD ICE_TX_DESC_CMD_EOP
12 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
16 PKT_TX_OUTER_IP_CKSUM)
18 #define ICE_RX_ERR_BITS 0x3f
20 static enum ice_status
21 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
23 struct ice_vsi *vsi = rxq->vsi;
24 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
25 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
26 struct ice_rlan_ctx rx_ctx;
28 uint16_t buf_size, len;
29 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
33 * The kernel driver uses flex descriptor. It sets the register
34 * to flex descriptor mode.
35 * DPDK uses legacy descriptor. It should set the register back
36 * to the default value, then uses legacy descriptor mode.
38 regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
39 QRXFLXP_CNTXT_RXDID_PRIO_M;
40 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
42 /* Set buffer size as the head split is disabled. */
43 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
44 RTE_PKTMBUF_HEADROOM);
46 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
47 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
48 rxq->max_pkt_len = RTE_MIN(len,
49 dev->data->dev_conf.rxmode.max_rx_pkt_len);
51 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
52 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
53 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
54 PMD_DRV_LOG(ERR, "maximum packet length must "
55 "be larger than %u and smaller than %u,"
56 "as jumbo frame is enabled",
57 (uint32_t)ETHER_MAX_LEN,
58 (uint32_t)ICE_FRAME_SIZE_MAX);
62 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
63 rxq->max_pkt_len > ETHER_MAX_LEN) {
64 PMD_DRV_LOG(ERR, "maximum packet length must be "
65 "larger than %u and smaller than %u, "
66 "as jumbo frame is disabled",
67 (uint32_t)ETHER_MIN_LEN,
68 (uint32_t)ETHER_MAX_LEN);
73 memset(&rx_ctx, 0, sizeof(rx_ctx));
75 rx_ctx.base = rxq->rx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
76 rx_ctx.qlen = rxq->nb_rx_desc;
77 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
78 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
79 rx_ctx.dtype = 0; /* No Header Split mode */
80 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
81 rx_ctx.dsize = 1; /* 32B descriptors */
83 rx_ctx.rxmax = rxq->max_pkt_len;
84 /* TPH: Transaction Layer Packet (TLP) processing hints */
85 rx_ctx.tphrdesc_ena = 1;
86 rx_ctx.tphwdesc_ena = 1;
87 rx_ctx.tphdata_ena = 1;
88 rx_ctx.tphhead_ena = 1;
89 /* Low Receive Queue Threshold defined in 64 descriptors units.
90 * When the number of free descriptors goes below the lrxqthresh,
91 * an immediate interrupt is triggered.
93 rx_ctx.lrxqthresh = 2;
94 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
98 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
100 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
104 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
106 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
111 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
112 RTE_PKTMBUF_HEADROOM);
114 /* Check if scattered RX needs to be used. */
115 if ((rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size)
116 dev->data->scattered_rx = 1;
118 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
120 /* Init the Rx tail register*/
121 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
126 /* Allocate mbufs for all descriptors in rx queue */
128 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
130 struct ice_rx_entry *rxe = rxq->sw_ring;
134 for (i = 0; i < rxq->nb_rx_desc; i++) {
135 volatile union ice_rx_desc *rxd;
136 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
138 if (unlikely(!mbuf)) {
139 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
143 rte_mbuf_refcnt_set(mbuf, 1);
145 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
147 mbuf->port = rxq->port_id;
150 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
152 rxd = &rxq->rx_ring[i];
153 rxd->read.pkt_addr = dma_addr;
154 rxd->read.hdr_addr = 0;
155 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
165 /* Free all mbufs for descriptors in rx queue */
167 ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
171 if (!rxq || !rxq->sw_ring) {
172 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
176 for (i = 0; i < rxq->nb_rx_desc; i++) {
177 if (rxq->sw_ring[i].mbuf) {
178 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
179 rxq->sw_ring[i].mbuf = NULL;
182 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
183 if (rxq->rx_nb_avail == 0)
185 for (i = 0; i < rxq->rx_nb_avail; i++) {
186 struct rte_mbuf *mbuf;
188 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
189 rte_pktmbuf_free_seg(mbuf);
191 rxq->rx_nb_avail = 0;
192 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
195 /* turn on or off rx queue
196 * @q_idx: queue index in pf scope
197 * @on: turn on or off the queue
200 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
205 /* QRX_CTRL = QRX_ENA */
206 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
209 if (reg & QRX_CTRL_QENA_STAT_M)
210 return 0; /* Already on, skip */
211 reg |= QRX_CTRL_QENA_REQ_M;
213 if (!(reg & QRX_CTRL_QENA_STAT_M))
214 return 0; /* Already off, skip */
215 reg &= ~QRX_CTRL_QENA_REQ_M;
218 /* Write the register */
219 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
220 /* Check the result. It is said that QENA_STAT
221 * follows the QENA_REQ not more than 10 use.
222 * TODO: need to change the wait counter later
224 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
225 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
226 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
228 if ((reg & QRX_CTRL_QENA_REQ_M) &&
229 (reg & QRX_CTRL_QENA_STAT_M))
232 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
233 !(reg & QRX_CTRL_QENA_STAT_M))
238 /* Check if it is timeout */
239 if (j >= ICE_CHK_Q_ENA_COUNT) {
240 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
241 (on ? "enable" : "disable"), q_idx);
249 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
250 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
252 ice_check_rx_burst_bulk_alloc_preconditions
253 (__rte_unused struct ice_rx_queue *rxq)
258 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
259 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
260 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
261 "rxq->rx_free_thresh=%d, "
262 "ICE_RX_MAX_BURST=%d",
263 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
265 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
266 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
267 "rxq->rx_free_thresh=%d, "
268 "rxq->nb_rx_desc=%d",
269 rxq->rx_free_thresh, rxq->nb_rx_desc);
271 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
272 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
273 "rxq->nb_rx_desc=%d, "
274 "rxq->rx_free_thresh=%d",
275 rxq->nb_rx_desc, rxq->rx_free_thresh);
285 /* reset fields in ice_rx_queue back to default */
287 ice_reset_rx_queue(struct ice_rx_queue *rxq)
293 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
297 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
298 if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
299 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
301 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
302 len = rxq->nb_rx_desc;
304 for (i = 0; i < len * sizeof(union ice_rx_desc); i++)
305 ((volatile char *)rxq->rx_ring)[i] = 0;
307 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
308 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
309 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
310 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
312 rxq->rx_nb_avail = 0;
313 rxq->rx_next_avail = 0;
314 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
315 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
319 rxq->pkt_first_seg = NULL;
320 rxq->pkt_last_seg = NULL;
324 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
326 struct ice_rx_queue *rxq;
328 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
330 PMD_INIT_FUNC_TRACE();
332 if (rx_queue_id >= dev->data->nb_rx_queues) {
333 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
334 rx_queue_id, dev->data->nb_rx_queues);
338 rxq = dev->data->rx_queues[rx_queue_id];
339 if (!rxq || !rxq->q_set) {
340 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
345 err = ice_program_hw_rx_queue(rxq);
347 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
352 err = ice_alloc_rx_queue_mbufs(rxq);
354 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
360 /* Init the RX tail register. */
361 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
363 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
365 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
368 ice_rx_queue_release_mbufs(rxq);
369 ice_reset_rx_queue(rxq);
373 dev->data->rx_queue_state[rx_queue_id] =
374 RTE_ETH_QUEUE_STATE_STARTED;
380 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
382 struct ice_rx_queue *rxq;
384 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
386 if (rx_queue_id < dev->data->nb_rx_queues) {
387 rxq = dev->data->rx_queues[rx_queue_id];
389 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
391 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
395 ice_rx_queue_release_mbufs(rxq);
396 ice_reset_rx_queue(rxq);
397 dev->data->rx_queue_state[rx_queue_id] =
398 RTE_ETH_QUEUE_STATE_STOPPED;
405 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
407 struct ice_tx_queue *txq;
411 struct ice_aqc_add_tx_qgrp txq_elem;
412 struct ice_tlan_ctx tx_ctx;
414 PMD_INIT_FUNC_TRACE();
416 if (tx_queue_id >= dev->data->nb_tx_queues) {
417 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
418 tx_queue_id, dev->data->nb_tx_queues);
422 txq = dev->data->tx_queues[tx_queue_id];
423 if (!txq || !txq->q_set) {
424 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
430 hw = ICE_VSI_TO_HW(vsi);
432 memset(&txq_elem, 0, sizeof(txq_elem));
433 memset(&tx_ctx, 0, sizeof(tx_ctx));
434 txq_elem.num_txqs = 1;
435 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
437 tx_ctx.base = txq->tx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
438 tx_ctx.qlen = txq->nb_tx_desc;
439 tx_ctx.pf_num = hw->pf_id;
440 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
441 tx_ctx.src_vsi = vsi->vsi_id;
442 tx_ctx.port_num = hw->port_info->lport;
443 tx_ctx.tso_ena = 1; /* tso enable */
444 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
445 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
447 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
450 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
452 /* Init the Tx tail register*/
453 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
455 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1, &txq_elem,
456 sizeof(txq_elem), NULL);
458 PMD_DRV_LOG(ERR, "Failed to add lan txq");
461 /* store the schedule node id */
462 txq->q_teid = txq_elem.txqs[0].q_teid;
464 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
468 /* Free all mbufs for descriptors in tx queue */
470 ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
474 if (!txq || !txq->sw_ring) {
475 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
479 for (i = 0; i < txq->nb_tx_desc; i++) {
480 if (txq->sw_ring[i].mbuf) {
481 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
482 txq->sw_ring[i].mbuf = NULL;
488 ice_reset_tx_queue(struct ice_tx_queue *txq)
490 struct ice_tx_entry *txe;
491 uint16_t i, prev, size;
494 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
499 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
500 for (i = 0; i < size; i++)
501 ((volatile char *)txq->tx_ring)[i] = 0;
503 prev = (uint16_t)(txq->nb_tx_desc - 1);
504 for (i = 0; i < txq->nb_tx_desc; i++) {
505 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
507 txd->cmd_type_offset_bsz =
508 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
511 txe[prev].next_id = i;
515 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
516 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
521 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
522 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
526 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
528 struct ice_tx_queue *txq;
529 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
530 enum ice_status status;
534 if (tx_queue_id >= dev->data->nb_tx_queues) {
535 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
536 tx_queue_id, dev->data->nb_tx_queues);
540 txq = dev->data->tx_queues[tx_queue_id];
542 PMD_DRV_LOG(ERR, "TX queue %u is not available",
547 q_ids[0] = txq->reg_idx;
548 q_teids[0] = txq->q_teid;
550 status = ice_dis_vsi_txq(hw->port_info, 1, q_ids, q_teids,
551 ICE_NO_RESET, 0, NULL);
552 if (status != ICE_SUCCESS) {
553 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
557 ice_tx_queue_release_mbufs(txq);
558 ice_reset_tx_queue(txq);
559 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
565 ice_rx_queue_setup(struct rte_eth_dev *dev,
568 unsigned int socket_id,
569 const struct rte_eth_rxconf *rx_conf,
570 struct rte_mempool *mp)
572 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
573 struct ice_adapter *ad =
574 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
575 struct ice_vsi *vsi = pf->main_vsi;
576 struct ice_rx_queue *rxq;
577 const struct rte_memzone *rz;
580 int use_def_burst_func = 1;
582 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
583 nb_desc > ICE_MAX_RING_DESC ||
584 nb_desc < ICE_MIN_RING_DESC) {
585 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
590 /* Free memory if needed */
591 if (dev->data->rx_queues[queue_idx]) {
592 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
593 dev->data->rx_queues[queue_idx] = NULL;
596 /* Allocate the rx queue data structure */
597 rxq = rte_zmalloc_socket(NULL,
598 sizeof(struct ice_rx_queue),
602 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
603 "rx queue data structure");
607 rxq->nb_rx_desc = nb_desc;
608 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
609 rxq->queue_id = queue_idx;
611 rxq->reg_idx = vsi->base_queue + queue_idx;
612 rxq->port_id = dev->data->port_id;
613 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
614 rxq->crc_len = ETHER_CRC_LEN;
618 rxq->drop_en = rx_conf->rx_drop_en;
620 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
622 /* Allocate the maximun number of RX ring hardware descriptor. */
623 len = ICE_MAX_RING_DESC;
625 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
627 * Allocating a little more memory because vectorized/bulk_alloc Rx
628 * functions doesn't check boundaries each time.
630 len += ICE_RX_MAX_BURST;
633 /* Allocate the maximum number of RX ring hardware descriptor. */
634 ring_size = sizeof(union ice_rx_desc) * len;
635 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
636 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
637 ring_size, ICE_RING_BASE_ALIGN,
640 ice_rx_queue_release(rxq);
641 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
645 /* Zero all the descriptors in the ring. */
646 memset(rz->addr, 0, ring_size);
648 rxq->rx_ring_phys_addr = rz->phys_addr;
649 rxq->rx_ring = (union ice_rx_desc *)rz->addr;
651 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
652 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
657 /* Allocate the software ring. */
658 rxq->sw_ring = rte_zmalloc_socket(NULL,
659 sizeof(struct ice_rx_entry) * len,
663 ice_rx_queue_release(rxq);
664 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
668 ice_reset_rx_queue(rxq);
670 dev->data->rx_queues[queue_idx] = rxq;
672 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
674 if (!use_def_burst_func) {
675 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
676 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
677 "satisfied. Rx Burst Bulk Alloc function will be "
678 "used on port=%d, queue=%d.",
679 rxq->port_id, rxq->queue_id);
680 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
682 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
683 "not satisfied, Scattered Rx is requested, "
684 "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
685 "not enabled on port=%d, queue=%d.",
686 rxq->port_id, rxq->queue_id);
687 ad->rx_bulk_alloc_allowed = false;
694 ice_rx_queue_release(void *rxq)
696 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
699 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
703 ice_rx_queue_release_mbufs(q);
704 rte_free(q->sw_ring);
709 ice_tx_queue_setup(struct rte_eth_dev *dev,
712 unsigned int socket_id,
713 const struct rte_eth_txconf *tx_conf)
715 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
716 struct ice_vsi *vsi = pf->main_vsi;
717 struct ice_tx_queue *txq;
718 const struct rte_memzone *tz;
720 uint16_t tx_rs_thresh, tx_free_thresh;
723 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
725 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
726 nb_desc > ICE_MAX_RING_DESC ||
727 nb_desc < ICE_MIN_RING_DESC) {
728 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
734 * The following two parameters control the setting of the RS bit on
735 * transmit descriptors. TX descriptors will have their RS bit set
736 * after txq->tx_rs_thresh descriptors have been used. The TX
737 * descriptor ring will be cleaned after txq->tx_free_thresh
738 * descriptors are used or if the number of descriptors required to
739 * transmit a packet is greater than the number of free TX descriptors.
741 * The following constraints must be satisfied:
742 * - tx_rs_thresh must be greater than 0.
743 * - tx_rs_thresh must be less than the size of the ring minus 2.
744 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
745 * - tx_rs_thresh must be a divisor of the ring size.
746 * - tx_free_thresh must be greater than 0.
747 * - tx_free_thresh must be less than the size of the ring minus 3.
749 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
750 * race condition, hence the maximum threshold constraints. When set
751 * to zero use default values.
753 tx_rs_thresh = (uint16_t)(tx_conf->tx_rs_thresh ?
754 tx_conf->tx_rs_thresh :
755 ICE_DEFAULT_TX_RSBIT_THRESH);
756 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
757 tx_conf->tx_free_thresh :
758 ICE_DEFAULT_TX_FREE_THRESH);
759 if (tx_rs_thresh >= (nb_desc - 2)) {
760 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
761 "number of TX descriptors minus 2. "
762 "(tx_rs_thresh=%u port=%d queue=%d)",
763 (unsigned int)tx_rs_thresh,
764 (int)dev->data->port_id,
768 if (tx_free_thresh >= (nb_desc - 3)) {
769 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
770 "tx_free_thresh must be less than the "
771 "number of TX descriptors minus 3. "
772 "(tx_free_thresh=%u port=%d queue=%d)",
773 (unsigned int)tx_free_thresh,
774 (int)dev->data->port_id,
778 if (tx_rs_thresh > tx_free_thresh) {
779 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
780 "equal to tx_free_thresh. (tx_free_thresh=%u"
781 " tx_rs_thresh=%u port=%d queue=%d)",
782 (unsigned int)tx_free_thresh,
783 (unsigned int)tx_rs_thresh,
784 (int)dev->data->port_id,
788 if ((nb_desc % tx_rs_thresh) != 0) {
789 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
790 "number of TX descriptors. (tx_rs_thresh=%u"
791 " port=%d queue=%d)",
792 (unsigned int)tx_rs_thresh,
793 (int)dev->data->port_id,
797 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
798 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
799 "tx_rs_thresh is greater than 1. "
800 "(tx_rs_thresh=%u port=%d queue=%d)",
801 (unsigned int)tx_rs_thresh,
802 (int)dev->data->port_id,
807 /* Free memory if needed. */
808 if (dev->data->tx_queues[queue_idx]) {
809 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
810 dev->data->tx_queues[queue_idx] = NULL;
813 /* Allocate the TX queue data structure. */
814 txq = rte_zmalloc_socket(NULL,
815 sizeof(struct ice_tx_queue),
819 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
820 "tx queue structure");
824 /* Allocate TX hardware ring descriptors. */
825 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
826 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
827 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
828 ring_size, ICE_RING_BASE_ALIGN,
831 ice_tx_queue_release(txq);
832 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
836 txq->nb_tx_desc = nb_desc;
837 txq->tx_rs_thresh = tx_rs_thresh;
838 txq->tx_free_thresh = tx_free_thresh;
839 txq->pthresh = tx_conf->tx_thresh.pthresh;
840 txq->hthresh = tx_conf->tx_thresh.hthresh;
841 txq->wthresh = tx_conf->tx_thresh.wthresh;
842 txq->queue_id = queue_idx;
844 txq->reg_idx = vsi->base_queue + queue_idx;
845 txq->port_id = dev->data->port_id;
846 txq->offloads = offloads;
848 txq->tx_deferred_start = tx_conf->tx_deferred_start;
850 txq->tx_ring_phys_addr = tz->phys_addr;
851 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
853 /* Allocate software ring */
855 rte_zmalloc_socket(NULL,
856 sizeof(struct ice_tx_entry) * nb_desc,
860 ice_tx_queue_release(txq);
861 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
865 ice_reset_tx_queue(txq);
867 dev->data->tx_queues[queue_idx] = txq;
873 ice_tx_queue_release(void *txq)
875 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
878 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
882 ice_tx_queue_release_mbufs(q);
883 rte_free(q->sw_ring);
888 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
889 struct rte_eth_rxq_info *qinfo)
891 struct ice_rx_queue *rxq;
893 rxq = dev->data->rx_queues[queue_id];
896 qinfo->scattered_rx = dev->data->scattered_rx;
897 qinfo->nb_desc = rxq->nb_rx_desc;
899 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
900 qinfo->conf.rx_drop_en = rxq->drop_en;
901 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
905 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
906 struct rte_eth_txq_info *qinfo)
908 struct ice_tx_queue *txq;
910 txq = dev->data->tx_queues[queue_id];
912 qinfo->nb_desc = txq->nb_tx_desc;
914 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
915 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
916 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
918 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
919 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
920 qinfo->conf.offloads = txq->offloads;
921 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
925 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
927 #define ICE_RXQ_SCAN_INTERVAL 4
928 volatile union ice_rx_desc *rxdp;
929 struct ice_rx_queue *rxq;
932 rxq = dev->data->rx_queues[rx_queue_id];
933 rxdp = &rxq->rx_ring[rxq->rx_tail];
934 while ((desc < rxq->nb_rx_desc) &&
935 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
936 ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S) &
937 (1 << ICE_RX_DESC_STATUS_DD_S)) {
939 * Check the DD bit of a rx descriptor of each 4 in a group,
940 * to avoid checking too frequently and downgrading performance
943 desc += ICE_RXQ_SCAN_INTERVAL;
944 rxdp += ICE_RXQ_SCAN_INTERVAL;
945 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
946 rxdp = &(rxq->rx_ring[rxq->rx_tail +
947 desc - rxq->nb_rx_desc]);
953 /* Translate the rx descriptor status to pkt flags */
954 static inline uint64_t
955 ice_rxd_status_to_pkt_flags(uint64_t qword)
959 /* Check if RSS_HASH */
960 flags = (((qword >> ICE_RX_DESC_STATUS_FLTSTAT_S) &
961 ICE_RX_DESC_FLTSTAT_RSS_HASH) ==
962 ICE_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
967 /* Rx L3/L4 checksum */
968 static inline uint64_t
969 ice_rxd_error_to_pkt_flags(uint64_t qword)
972 uint64_t error_bits = (qword >> ICE_RXD_QW1_ERROR_S);
974 if (likely((error_bits & ICE_RX_ERR_BITS) == 0)) {
975 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
979 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_IPE_S)))
980 flags |= PKT_RX_IP_CKSUM_BAD;
982 flags |= PKT_RX_IP_CKSUM_GOOD;
984 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_L4E_S)))
985 flags |= PKT_RX_L4_CKSUM_BAD;
987 flags |= PKT_RX_L4_CKSUM_GOOD;
989 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_EIPE_S)))
990 flags |= PKT_RX_EIP_CKSUM_BAD;
996 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)
998 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
999 (1 << ICE_RX_DESC_STATUS_L2TAG1P_S)) {
1000 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1002 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1003 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1004 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
1009 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1010 if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
1011 (1 << ICE_RX_DESC_EXT_STATUS_L2TAG2P_S)) {
1012 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1013 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1014 mb->vlan_tci_outer = mb->vlan_tci;
1015 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
1016 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1017 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
1018 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
1020 mb->vlan_tci_outer = 0;
1023 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1024 mb->vlan_tci, mb->vlan_tci_outer);
1027 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1028 #define ICE_LOOK_AHEAD 8
1029 #if (ICE_LOOK_AHEAD != 8)
1030 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1033 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1035 volatile union ice_rx_desc *rxdp;
1036 struct ice_rx_entry *rxep;
1037 struct rte_mbuf *mb;
1041 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1042 int32_t i, j, nb_rx = 0;
1043 uint64_t pkt_flags = 0;
1044 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1046 rxdp = &rxq->rx_ring[rxq->rx_tail];
1047 rxep = &rxq->sw_ring[rxq->rx_tail];
1049 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1050 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S;
1052 /* Make sure there is at least 1 packet to receive */
1053 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1057 * Scan LOOK_AHEAD descriptors at a time to determine which
1058 * descriptors reference packets that are ready to be received.
1060 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1061 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1062 /* Read desc statuses backwards to avoid race condition */
1063 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--) {
1064 qword1 = rte_le_to_cpu_64(
1065 rxdp[j].wb.qword1.status_error_len);
1066 s[j] = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1067 ICE_RXD_QW1_STATUS_S;
1072 /* Compute how many status bits were set */
1073 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1074 nb_dd += s[j] & (1 << ICE_RX_DESC_STATUS_DD_S);
1078 /* Translate descriptor info to mbuf parameters */
1079 for (j = 0; j < nb_dd; j++) {
1081 qword1 = rte_le_to_cpu_64(
1082 rxdp[j].wb.qword1.status_error_len);
1083 pkt_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1084 ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
1085 mb->data_len = pkt_len;
1086 mb->pkt_len = pkt_len;
1088 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1089 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1090 if (pkt_flags & PKT_RX_RSS_HASH)
1093 rxdp[j].wb.qword0.hi_dword.rss);
1094 mb->packet_type = ptype_tbl[(uint8_t)(
1096 ICE_RXD_QW1_PTYPE_M) >>
1097 ICE_RXD_QW1_PTYPE_S)];
1098 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1100 mb->ol_flags |= pkt_flags;
1103 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1104 rxq->rx_stage[i + j] = rxep[j].mbuf;
1106 if (nb_dd != ICE_LOOK_AHEAD)
1110 /* Clear software ring entries */
1111 for (i = 0; i < nb_rx; i++)
1112 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1114 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1115 "port_id=%u, queue_id=%u, nb_rx=%d",
1116 rxq->port_id, rxq->queue_id, nb_rx);
1121 static inline uint16_t
1122 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1123 struct rte_mbuf **rx_pkts,
1127 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1129 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1131 for (i = 0; i < nb_pkts; i++)
1132 rx_pkts[i] = stage[i];
1134 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1135 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1141 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1143 volatile union ice_rx_desc *rxdp;
1144 struct ice_rx_entry *rxep;
1145 struct rte_mbuf *mb;
1146 uint16_t alloc_idx, i;
1150 /* Allocate buffers in bulk */
1151 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1152 (rxq->rx_free_thresh - 1));
1153 rxep = &rxq->sw_ring[alloc_idx];
1154 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1155 rxq->rx_free_thresh);
1156 if (unlikely(diag != 0)) {
1157 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1161 rxdp = &rxq->rx_ring[alloc_idx];
1162 for (i = 0; i < rxq->rx_free_thresh; i++) {
1163 if (likely(i < (rxq->rx_free_thresh - 1)))
1164 /* Prefetch next mbuf */
1165 rte_prefetch0(rxep[i + 1].mbuf);
1168 rte_mbuf_refcnt_set(mb, 1);
1170 mb->data_off = RTE_PKTMBUF_HEADROOM;
1172 mb->port = rxq->port_id;
1173 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1174 rxdp[i].read.hdr_addr = 0;
1175 rxdp[i].read.pkt_addr = dma_addr;
1178 /* Update rx tail regsiter */
1180 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1182 rxq->rx_free_trigger =
1183 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1184 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1185 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1190 static inline uint16_t
1191 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1193 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1195 struct rte_eth_dev *dev;
1200 if (rxq->rx_nb_avail)
1201 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1203 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1204 rxq->rx_next_avail = 0;
1205 rxq->rx_nb_avail = nb_rx;
1206 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1208 if (rxq->rx_tail > rxq->rx_free_trigger) {
1209 if (ice_rx_alloc_bufs(rxq) != 0) {
1212 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1213 dev->data->rx_mbuf_alloc_failed +=
1214 rxq->rx_free_thresh;
1215 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1216 "port_id=%u, queue_id=%u",
1217 rxq->port_id, rxq->queue_id);
1218 rxq->rx_nb_avail = 0;
1219 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1220 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1221 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1227 if (rxq->rx_tail >= rxq->nb_rx_desc)
1230 if (rxq->rx_nb_avail)
1231 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1237 ice_recv_pkts_bulk_alloc(void *rx_queue,
1238 struct rte_mbuf **rx_pkts,
1245 if (unlikely(nb_pkts == 0))
1248 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1249 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1252 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1253 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1254 nb_rx = (uint16_t)(nb_rx + count);
1255 nb_pkts = (uint16_t)(nb_pkts - count);
1264 ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
1265 struct rte_mbuf __rte_unused **rx_pkts,
1266 uint16_t __rte_unused nb_pkts)
1270 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
1273 ice_recv_scattered_pkts(void *rx_queue,
1274 struct rte_mbuf **rx_pkts,
1277 struct ice_rx_queue *rxq = rx_queue;
1278 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1279 volatile union ice_rx_desc *rxdp;
1280 union ice_rx_desc rxd;
1281 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1282 struct ice_rx_entry *rxe;
1283 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1284 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1285 struct rte_mbuf *nmb; /* new allocated mbuf */
1286 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1287 uint16_t rx_id = rxq->rx_tail;
1289 uint16_t nb_hold = 0;
1290 uint16_t rx_packet_len;
1294 uint64_t pkt_flags = 0;
1295 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1296 struct rte_eth_dev *dev;
1298 while (nb_rx < nb_pkts) {
1299 rxdp = &rx_ring[rx_id];
1300 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1301 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1302 ICE_RXD_QW1_STATUS_S;
1304 /* Check the DD bit first */
1305 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1309 nmb = rte_mbuf_raw_alloc(rxq->mp);
1310 if (unlikely(!nmb)) {
1311 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1312 dev->data->rx_mbuf_alloc_failed++;
1315 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1318 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1320 if (unlikely(rx_id == rxq->nb_rx_desc))
1323 /* Prefetch next mbuf */
1324 rte_prefetch0(sw_ring[rx_id].mbuf);
1327 * When next RX descriptor is on a cache line boundary,
1328 * prefetch the next 4 RX descriptors and next 8 pointers
1331 if ((rx_id & 0x3) == 0) {
1332 rte_prefetch0(&rx_ring[rx_id]);
1333 rte_prefetch0(&sw_ring[rx_id]);
1339 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1341 /* Set data buffer address and data length of the mbuf */
1342 rxdp->read.hdr_addr = 0;
1343 rxdp->read.pkt_addr = dma_addr;
1344 rx_packet_len = (qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1345 ICE_RXD_QW1_LEN_PBUF_S;
1346 rxm->data_len = rx_packet_len;
1347 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1348 ice_rxd_to_vlan_tci(rxm, rxdp);
1349 rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
1350 ICE_RXD_QW1_PTYPE_M) >>
1351 ICE_RXD_QW1_PTYPE_S)];
1354 * If this is the first buffer of the received packet, set the
1355 * pointer to the first mbuf of the packet and initialize its
1356 * context. Otherwise, update the total length and the number
1357 * of segments of the current scattered packet, and update the
1358 * pointer to the last mbuf of the current packet.
1362 first_seg->nb_segs = 1;
1363 first_seg->pkt_len = rx_packet_len;
1365 first_seg->pkt_len =
1366 (uint16_t)(first_seg->pkt_len +
1368 first_seg->nb_segs++;
1369 last_seg->next = rxm;
1373 * If this is not the last buffer of the received packet,
1374 * update the pointer to the last mbuf of the current scattered
1375 * packet and continue to parse the RX ring.
1377 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_EOF_S))) {
1383 * This is the last buffer of the received packet. If the CRC
1384 * is not stripped by the hardware:
1385 * - Subtract the CRC length from the total packet length.
1386 * - If the last buffer only contains the whole CRC or a part
1387 * of it, free the mbuf associated to the last buffer. If part
1388 * of the CRC is also contained in the previous mbuf, subtract
1389 * the length of that CRC part from the data length of the
1393 if (unlikely(rxq->crc_len > 0)) {
1394 first_seg->pkt_len -= ETHER_CRC_LEN;
1395 if (rx_packet_len <= ETHER_CRC_LEN) {
1396 rte_pktmbuf_free_seg(rxm);
1397 first_seg->nb_segs--;
1398 last_seg->data_len =
1399 (uint16_t)(last_seg->data_len -
1400 (ETHER_CRC_LEN - rx_packet_len));
1401 last_seg->next = NULL;
1403 rxm->data_len = (uint16_t)(rx_packet_len -
1407 first_seg->port = rxq->port_id;
1408 first_seg->ol_flags = 0;
1410 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1411 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1412 if (pkt_flags & PKT_RX_RSS_HASH)
1413 first_seg->hash.rss =
1414 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1416 first_seg->ol_flags |= pkt_flags;
1417 /* Prefetch data of first segment, if configured to do so. */
1418 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1419 first_seg->data_off));
1420 rx_pkts[nb_rx++] = first_seg;
1424 /* Record index of the next RX descriptor to probe. */
1425 rxq->rx_tail = rx_id;
1426 rxq->pkt_first_seg = first_seg;
1427 rxq->pkt_last_seg = last_seg;
1430 * If the number of free RX descriptors is greater than the RX free
1431 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1432 * register. Update the RDT with the value of the last processed RX
1433 * descriptor minus 1, to guarantee that the RDT register is never
1434 * equal to the RDH register, which creates a "full" ring situtation
1435 * from the hardware point of view.
1437 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1438 if (nb_hold > rxq->rx_free_thresh) {
1439 rx_id = (uint16_t)(rx_id == 0 ?
1440 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1441 /* write TAIL register */
1442 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1445 rxq->nb_rx_hold = nb_hold;
1447 /* return received packet in the burst */
1452 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1454 static const uint32_t ptypes[] = {
1455 /* refers to ice_get_default_pkt_type() */
1457 RTE_PTYPE_L2_ETHER_LLDP,
1458 RTE_PTYPE_L2_ETHER_ARP,
1459 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1460 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1463 RTE_PTYPE_L4_NONFRAG,
1467 RTE_PTYPE_TUNNEL_GRENAT,
1468 RTE_PTYPE_TUNNEL_IP,
1469 RTE_PTYPE_INNER_L2_ETHER,
1470 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1471 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1472 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1473 RTE_PTYPE_INNER_L4_FRAG,
1474 RTE_PTYPE_INNER_L4_ICMP,
1475 RTE_PTYPE_INNER_L4_NONFRAG,
1476 RTE_PTYPE_INNER_L4_SCTP,
1477 RTE_PTYPE_INNER_L4_TCP,
1478 RTE_PTYPE_INNER_L4_UDP,
1479 RTE_PTYPE_TUNNEL_GTPC,
1480 RTE_PTYPE_TUNNEL_GTPU,
1484 if (dev->rx_pkt_burst == ice_recv_pkts ||
1485 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1486 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1488 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1494 ice_clear_queues(struct rte_eth_dev *dev)
1498 PMD_INIT_FUNC_TRACE();
1500 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1501 ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
1502 ice_reset_tx_queue(dev->data->tx_queues[i]);
1505 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1506 ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
1507 ice_reset_rx_queue(dev->data->rx_queues[i]);
1512 ice_free_queues(struct rte_eth_dev *dev)
1516 PMD_INIT_FUNC_TRACE();
1518 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1519 if (!dev->data->rx_queues[i])
1521 ice_rx_queue_release(dev->data->rx_queues[i]);
1522 dev->data->rx_queues[i] = NULL;
1524 dev->data->nb_rx_queues = 0;
1526 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1527 if (!dev->data->tx_queues[i])
1529 ice_tx_queue_release(dev->data->tx_queues[i]);
1530 dev->data->tx_queues[i] = NULL;
1532 dev->data->nb_tx_queues = 0;
1536 ice_recv_pkts(void *rx_queue,
1537 struct rte_mbuf **rx_pkts,
1540 struct ice_rx_queue *rxq = rx_queue;
1541 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1542 volatile union ice_rx_desc *rxdp;
1543 union ice_rx_desc rxd;
1544 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1545 struct ice_rx_entry *rxe;
1546 struct rte_mbuf *nmb; /* new allocated mbuf */
1547 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1548 uint16_t rx_id = rxq->rx_tail;
1550 uint16_t nb_hold = 0;
1551 uint16_t rx_packet_len;
1555 uint64_t pkt_flags = 0;
1556 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1557 struct rte_eth_dev *dev;
1559 while (nb_rx < nb_pkts) {
1560 rxdp = &rx_ring[rx_id];
1561 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1562 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1563 ICE_RXD_QW1_STATUS_S;
1565 /* Check the DD bit first */
1566 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1570 nmb = rte_mbuf_raw_alloc(rxq->mp);
1571 if (unlikely(!nmb)) {
1572 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1573 dev->data->rx_mbuf_alloc_failed++;
1576 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1579 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1581 if (unlikely(rx_id == rxq->nb_rx_desc))
1586 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1589 * fill the read format of descriptor with physic address in
1590 * new allocated mbuf: nmb
1592 rxdp->read.hdr_addr = 0;
1593 rxdp->read.pkt_addr = dma_addr;
1595 /* calculate rx_packet_len of the received pkt */
1596 rx_packet_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1597 ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
1599 /* fill old mbuf with received descriptor: rxd */
1600 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1601 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1604 rxm->pkt_len = rx_packet_len;
1605 rxm->data_len = rx_packet_len;
1606 rxm->port = rxq->port_id;
1607 ice_rxd_to_vlan_tci(rxm, rxdp);
1608 rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
1609 ICE_RXD_QW1_PTYPE_M) >>
1610 ICE_RXD_QW1_PTYPE_S)];
1611 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1612 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1613 if (pkt_flags & PKT_RX_RSS_HASH)
1615 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1616 rxm->ol_flags |= pkt_flags;
1617 /* copy old mbuf to rx_pkts */
1618 rx_pkts[nb_rx++] = rxm;
1620 rxq->rx_tail = rx_id;
1622 * If the number of free RX descriptors is greater than the RX free
1623 * threshold of the queue, advance the receive tail register of queue.
1624 * Update that register with the value of the last processed RX
1625 * descriptor minus 1.
1627 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1628 if (nb_hold > rxq->rx_free_thresh) {
1629 rx_id = (uint16_t)(rx_id == 0 ?
1630 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1631 /* write TAIL register */
1632 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1635 rxq->nb_rx_hold = nb_hold;
1637 /* return received packet in the burst */
1642 ice_txd_enable_checksum(uint64_t ol_flags,
1644 uint32_t *td_offset,
1645 union ice_tx_offload tx_offload)
1647 /* L2 length must be set. */
1648 *td_offset |= (tx_offload.l2_len >> 1) <<
1649 ICE_TX_DESC_LEN_MACLEN_S;
1651 /* Enable L3 checksum offloads */
1652 if (ol_flags & PKT_TX_IP_CKSUM) {
1653 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1654 *td_offset |= (tx_offload.l3_len >> 2) <<
1655 ICE_TX_DESC_LEN_IPLEN_S;
1656 } else if (ol_flags & PKT_TX_IPV4) {
1657 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1658 *td_offset |= (tx_offload.l3_len >> 2) <<
1659 ICE_TX_DESC_LEN_IPLEN_S;
1660 } else if (ol_flags & PKT_TX_IPV6) {
1661 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1662 *td_offset |= (tx_offload.l3_len >> 2) <<
1663 ICE_TX_DESC_LEN_IPLEN_S;
1666 if (ol_flags & PKT_TX_TCP_SEG) {
1667 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1668 *td_offset |= (tx_offload.l4_len >> 2) <<
1669 ICE_TX_DESC_LEN_L4_LEN_S;
1673 /* Enable L4 checksum offloads */
1674 switch (ol_flags & PKT_TX_L4_MASK) {
1675 case PKT_TX_TCP_CKSUM:
1676 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1677 *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
1678 ICE_TX_DESC_LEN_L4_LEN_S;
1680 case PKT_TX_SCTP_CKSUM:
1681 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1682 *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
1683 ICE_TX_DESC_LEN_L4_LEN_S;
1685 case PKT_TX_UDP_CKSUM:
1686 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1687 *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
1688 ICE_TX_DESC_LEN_L4_LEN_S;
1696 ice_xmit_cleanup(struct ice_tx_queue *txq)
1698 struct ice_tx_entry *sw_ring = txq->sw_ring;
1699 volatile struct ice_tx_desc *txd = txq->tx_ring;
1700 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1701 uint16_t nb_tx_desc = txq->nb_tx_desc;
1702 uint16_t desc_to_clean_to;
1703 uint16_t nb_tx_to_clean;
1705 /* Determine the last descriptor needing to be cleaned */
1706 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
1707 if (desc_to_clean_to >= nb_tx_desc)
1708 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1710 /* Check to make sure the last descriptor to clean is done */
1711 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1712 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
1713 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
1714 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1715 "(port=%d queue=%d) value=0x%"PRIx64"\n",
1717 txq->port_id, txq->queue_id,
1718 txd[desc_to_clean_to].cmd_type_offset_bsz);
1719 /* Failed to clean any descriptors */
1723 /* Figure out how many descriptors will be cleaned */
1724 if (last_desc_cleaned > desc_to_clean_to)
1725 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1728 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1731 /* The last descriptor to clean is done, so that means all the
1732 * descriptors from the last descriptor that was cleaned
1733 * up to the last descriptor with the RS bit set
1734 * are done. Only reset the threshold descriptor.
1736 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1738 /* Update the txq to reflect the last descriptor that was cleaned */
1739 txq->last_desc_cleaned = desc_to_clean_to;
1740 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
1745 /* Construct the tx flags */
1746 static inline uint64_t
1747 ice_build_ctob(uint32_t td_cmd,
1752 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
1753 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
1754 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
1755 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1756 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
1759 /* Check if the context descriptor is needed for TX offloading */
1760 static inline uint16_t
1761 ice_calc_context_desc(uint64_t flags)
1763 static uint64_t mask = PKT_TX_TCP_SEG | PKT_TX_QINQ;
1765 return (flags & mask) ? 1 : 0;
1768 /* set ice TSO context descriptor */
1769 static inline uint64_t
1770 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
1772 uint64_t ctx_desc = 0;
1773 uint32_t cd_cmd, hdr_len, cd_tso_len;
1775 if (!tx_offload.l4_len) {
1776 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1781 * in case of non tunneling packet, the outer_l2_len and
1782 * outer_l3_len must be 0.
1784 hdr_len = tx_offload.outer_l2_len +
1785 tx_offload.outer_l3_len +
1790 cd_cmd = ICE_TX_CTX_DESC_TSO;
1791 cd_tso_len = mbuf->pkt_len - hdr_len;
1792 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
1793 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1794 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
1800 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1802 struct ice_tx_queue *txq;
1803 volatile struct ice_tx_desc *tx_ring;
1804 volatile struct ice_tx_desc *txd;
1805 struct ice_tx_entry *sw_ring;
1806 struct ice_tx_entry *txe, *txn;
1807 struct rte_mbuf *tx_pkt;
1808 struct rte_mbuf *m_seg;
1813 uint32_t td_cmd = 0;
1814 uint32_t td_offset = 0;
1815 uint32_t td_tag = 0;
1817 uint64_t buf_dma_addr;
1819 union ice_tx_offload tx_offload = {0};
1822 sw_ring = txq->sw_ring;
1823 tx_ring = txq->tx_ring;
1824 tx_id = txq->tx_tail;
1825 txe = &sw_ring[tx_id];
1827 /* Check if the descriptor ring needs to be cleaned. */
1828 if (txq->nb_tx_free < txq->tx_free_thresh)
1829 ice_xmit_cleanup(txq);
1831 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1832 tx_pkt = *tx_pkts++;
1835 ol_flags = tx_pkt->ol_flags;
1836 tx_offload.l2_len = tx_pkt->l2_len;
1837 tx_offload.l3_len = tx_pkt->l3_len;
1838 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
1839 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
1840 tx_offload.l4_len = tx_pkt->l4_len;
1841 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1842 /* Calculate the number of context descriptors needed. */
1843 nb_ctx = ice_calc_context_desc(ol_flags);
1845 /* The number of descriptors that must be allocated for
1846 * a packet equals to the number of the segments of that
1847 * packet plus the number of context descriptor if needed.
1849 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1850 tx_last = (uint16_t)(tx_id + nb_used - 1);
1853 if (tx_last >= txq->nb_tx_desc)
1854 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1856 if (nb_used > txq->nb_tx_free) {
1857 if (ice_xmit_cleanup(txq) != 0) {
1862 if (unlikely(nb_used > txq->tx_rs_thresh)) {
1863 while (nb_used > txq->nb_tx_free) {
1864 if (ice_xmit_cleanup(txq) != 0) {
1873 /* Descriptor based VLAN insertion */
1874 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
1875 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
1876 td_tag = tx_pkt->vlan_tci;
1879 /* Enable checksum offloading */
1880 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
1881 ice_txd_enable_checksum(ol_flags, &td_cmd,
1882 &td_offset, tx_offload);
1886 /* Setup TX context descriptor if required */
1887 volatile struct ice_tx_ctx_desc *ctx_txd =
1888 (volatile struct ice_tx_ctx_desc *)
1890 uint16_t cd_l2tag2 = 0;
1891 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
1893 txn = &sw_ring[txe->next_id];
1894 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1896 rte_pktmbuf_free_seg(txe->mbuf);
1900 if (ol_flags & PKT_TX_TCP_SEG)
1901 cd_type_cmd_tso_mss |=
1902 ice_set_tso_ctx(tx_pkt, tx_offload);
1904 /* TX context descriptor based double VLAN insert */
1905 if (ol_flags & PKT_TX_QINQ) {
1906 cd_l2tag2 = tx_pkt->vlan_tci_outer;
1907 cd_type_cmd_tso_mss |=
1908 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
1909 ICE_TXD_CTX_QW1_CMD_S);
1911 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
1913 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1915 txe->last_id = tx_last;
1916 tx_id = txe->next_id;
1922 txd = &tx_ring[tx_id];
1923 txn = &sw_ring[txe->next_id];
1926 rte_pktmbuf_free_seg(txe->mbuf);
1929 /* Setup TX Descriptor */
1930 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1931 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
1932 txd->cmd_type_offset_bsz =
1933 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
1934 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
1935 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
1936 ((uint64_t)m_seg->data_len <<
1937 ICE_TXD_QW1_TX_BUF_SZ_S) |
1938 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
1940 txe->last_id = tx_last;
1941 tx_id = txe->next_id;
1943 m_seg = m_seg->next;
1946 /* fill the last descriptor with End of Packet (EOP) bit */
1947 td_cmd |= ICE_TX_DESC_CMD_EOP;
1948 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
1949 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
1951 /* set RS bit on the last descriptor of one packet */
1952 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
1953 PMD_TX_FREE_LOG(DEBUG,
1954 "Setting RS bit on TXD id="
1955 "%4u (port=%d queue=%d)",
1956 tx_last, txq->port_id, txq->queue_id);
1958 td_cmd |= ICE_TX_DESC_CMD_RS;
1960 /* Update txq RS bit counters */
1961 txq->nb_tx_used = 0;
1963 txd->cmd_type_offset_bsz |=
1964 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
1970 /* update Tail register */
1971 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
1972 txq->tx_tail = tx_id;
1977 static inline int __attribute__((always_inline))
1978 ice_tx_free_bufs(struct ice_tx_queue *txq)
1980 struct ice_tx_entry *txep;
1983 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
1984 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
1985 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
1988 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
1990 for (i = 0; i < txq->tx_rs_thresh; i++)
1991 rte_prefetch0((txep + i)->mbuf);
1993 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
1994 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
1995 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
1999 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2000 rte_pktmbuf_free_seg(txep->mbuf);
2005 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2006 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2007 if (txq->tx_next_dd >= txq->nb_tx_desc)
2008 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2010 return txq->tx_rs_thresh;
2013 /* Populate 4 descriptors with data from 4 mbufs */
2015 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2020 for (i = 0; i < 4; i++, txdp++, pkts++) {
2021 dma_addr = rte_mbuf_data_iova(*pkts);
2022 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2023 txdp->cmd_type_offset_bsz =
2024 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2025 (*pkts)->data_len, 0);
2029 /* Populate 1 descriptor with data from 1 mbuf */
2031 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2035 dma_addr = rte_mbuf_data_iova(*pkts);
2036 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2037 txdp->cmd_type_offset_bsz =
2038 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2039 (*pkts)->data_len, 0);
2043 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2046 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2047 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2048 const int N_PER_LOOP = 4;
2049 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2050 int mainpart, leftover;
2054 * Process most of the packets in chunks of N pkts. Any
2055 * leftover packets will get processed one at a time.
2057 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2058 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2059 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2060 /* Copy N mbuf pointers to the S/W ring */
2061 for (j = 0; j < N_PER_LOOP; ++j)
2062 (txep + i + j)->mbuf = *(pkts + i + j);
2063 tx4(txdp + i, pkts + i);
2066 if (unlikely(leftover > 0)) {
2067 for (i = 0; i < leftover; ++i) {
2068 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2069 tx1(txdp + mainpart + i, pkts + mainpart + i);
2074 static inline uint16_t
2075 tx_xmit_pkts(struct ice_tx_queue *txq,
2076 struct rte_mbuf **tx_pkts,
2079 volatile struct ice_tx_desc *txr = txq->tx_ring;
2083 * Begin scanning the H/W ring for done descriptors when the number
2084 * of available descriptors drops below tx_free_thresh. For each done
2085 * descriptor, free the associated buffer.
2087 if (txq->nb_tx_free < txq->tx_free_thresh)
2088 ice_tx_free_bufs(txq);
2090 /* Use available descriptor only */
2091 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2092 if (unlikely(!nb_pkts))
2095 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2096 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2097 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2098 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2099 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2100 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2102 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2106 /* Fill hardware descriptor ring with mbuf data */
2107 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2108 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2110 /* Determin if RS bit needs to be set */
2111 if (txq->tx_tail > txq->tx_next_rs) {
2112 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2113 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2116 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2117 if (txq->tx_next_rs >= txq->nb_tx_desc)
2118 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2121 if (txq->tx_tail >= txq->nb_tx_desc)
2124 /* Update the tx tail register */
2126 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2132 ice_xmit_pkts_simple(void *tx_queue,
2133 struct rte_mbuf **tx_pkts,
2138 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2139 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2143 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2146 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2147 &tx_pkts[nb_tx], num);
2148 nb_tx = (uint16_t)(nb_tx + ret);
2149 nb_pkts = (uint16_t)(nb_pkts - ret);
2157 void __attribute__((cold))
2158 ice_set_rx_function(struct rte_eth_dev *dev)
2160 PMD_INIT_FUNC_TRACE();
2161 struct ice_adapter *ad =
2162 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2164 if (dev->data->scattered_rx) {
2165 /* Set the non-LRO scattered function */
2167 "Using a Scattered function on port %d.",
2168 dev->data->port_id);
2169 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2170 } else if (ad->rx_bulk_alloc_allowed) {
2172 "Rx Burst Bulk Alloc Preconditions are "
2173 "satisfied. Rx Burst Bulk Alloc function "
2174 "will be used on port %d.",
2175 dev->data->port_id);
2176 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2179 "Rx Burst Bulk Alloc Preconditions are not "
2180 "satisfied, Normal Rx will be used on port %d.",
2181 dev->data->port_id);
2182 dev->rx_pkt_burst = ice_recv_pkts;
2186 /*********************************************************************
2190 **********************************************************************/
2191 /* The default values of TSO MSS */
2192 #define ICE_MIN_TSO_MSS 64
2193 #define ICE_MAX_TSO_MSS 9728
2194 #define ICE_MAX_TSO_FRAME_SIZE 262144
2196 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2203 for (i = 0; i < nb_pkts; i++) {
2205 ol_flags = m->ol_flags;
2207 if (ol_flags & PKT_TX_TCP_SEG &&
2208 (m->tso_segsz < ICE_MIN_TSO_MSS ||
2209 m->tso_segsz > ICE_MAX_TSO_MSS ||
2210 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
2212 * MSS outside the range are considered malicious
2214 rte_errno = -EINVAL;
2218 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2219 ret = rte_validate_tx_offload(m);
2225 ret = rte_net_intel_cksum_prepare(m);
2234 void __attribute__((cold))
2235 ice_set_tx_function(struct rte_eth_dev *dev)
2237 struct ice_adapter *ad =
2238 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2240 if (ad->tx_simple_allowed) {
2241 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
2242 dev->tx_pkt_burst = ice_xmit_pkts_simple;
2243 dev->tx_pkt_prepare = NULL;
2245 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
2246 dev->tx_pkt_burst = ice_xmit_pkts;
2247 dev->tx_pkt_prepare = ice_prep_pkts;
2251 /* For each value it means, datasheet of hardware can tell more details
2253 * @note: fix ice_dev_supported_ptypes_get() if any change here.
2255 static inline uint32_t
2256 ice_get_default_pkt_type(uint16_t ptype)
2258 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
2259 __rte_cache_aligned = {
2262 [1] = RTE_PTYPE_L2_ETHER,
2263 /* [2] - [5] reserved */
2264 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2265 /* [7] - [10] reserved */
2266 [11] = RTE_PTYPE_L2_ETHER_ARP,
2267 /* [12] - [21] reserved */
2269 /* Non tunneled IPv4 */
2270 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2272 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2273 RTE_PTYPE_L4_NONFRAG,
2274 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2277 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2279 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2281 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2285 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2286 RTE_PTYPE_TUNNEL_IP |
2287 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2288 RTE_PTYPE_INNER_L4_FRAG,
2289 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2290 RTE_PTYPE_TUNNEL_IP |
2291 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2292 RTE_PTYPE_INNER_L4_NONFRAG,
2293 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2294 RTE_PTYPE_TUNNEL_IP |
2295 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2296 RTE_PTYPE_INNER_L4_UDP,
2298 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2299 RTE_PTYPE_TUNNEL_IP |
2300 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2301 RTE_PTYPE_INNER_L4_TCP,
2302 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2303 RTE_PTYPE_TUNNEL_IP |
2304 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2305 RTE_PTYPE_INNER_L4_SCTP,
2306 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2307 RTE_PTYPE_TUNNEL_IP |
2308 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2309 RTE_PTYPE_INNER_L4_ICMP,
2312 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2313 RTE_PTYPE_TUNNEL_IP |
2314 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2315 RTE_PTYPE_INNER_L4_FRAG,
2316 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2317 RTE_PTYPE_TUNNEL_IP |
2318 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2319 RTE_PTYPE_INNER_L4_NONFRAG,
2320 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2321 RTE_PTYPE_TUNNEL_IP |
2322 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2323 RTE_PTYPE_INNER_L4_UDP,
2325 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2326 RTE_PTYPE_TUNNEL_IP |
2327 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2328 RTE_PTYPE_INNER_L4_TCP,
2329 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2330 RTE_PTYPE_TUNNEL_IP |
2331 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2332 RTE_PTYPE_INNER_L4_SCTP,
2333 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2334 RTE_PTYPE_TUNNEL_IP |
2335 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2336 RTE_PTYPE_INNER_L4_ICMP,
2338 /* IPv4 --> GRE/Teredo/VXLAN */
2339 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2340 RTE_PTYPE_TUNNEL_GRENAT,
2342 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2343 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2344 RTE_PTYPE_TUNNEL_GRENAT |
2345 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2346 RTE_PTYPE_INNER_L4_FRAG,
2347 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2348 RTE_PTYPE_TUNNEL_GRENAT |
2349 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2350 RTE_PTYPE_INNER_L4_NONFRAG,
2351 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2352 RTE_PTYPE_TUNNEL_GRENAT |
2353 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2354 RTE_PTYPE_INNER_L4_UDP,
2356 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2357 RTE_PTYPE_TUNNEL_GRENAT |
2358 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2359 RTE_PTYPE_INNER_L4_TCP,
2360 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2361 RTE_PTYPE_TUNNEL_GRENAT |
2362 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2363 RTE_PTYPE_INNER_L4_SCTP,
2364 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2365 RTE_PTYPE_TUNNEL_GRENAT |
2366 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2367 RTE_PTYPE_INNER_L4_ICMP,
2369 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2370 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2371 RTE_PTYPE_TUNNEL_GRENAT |
2372 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2373 RTE_PTYPE_INNER_L4_FRAG,
2374 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2375 RTE_PTYPE_TUNNEL_GRENAT |
2376 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2377 RTE_PTYPE_INNER_L4_NONFRAG,
2378 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2379 RTE_PTYPE_TUNNEL_GRENAT |
2380 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2381 RTE_PTYPE_INNER_L4_UDP,
2383 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2384 RTE_PTYPE_TUNNEL_GRENAT |
2385 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2386 RTE_PTYPE_INNER_L4_TCP,
2387 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2388 RTE_PTYPE_TUNNEL_GRENAT |
2389 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2390 RTE_PTYPE_INNER_L4_SCTP,
2391 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2392 RTE_PTYPE_TUNNEL_GRENAT |
2393 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2394 RTE_PTYPE_INNER_L4_ICMP,
2396 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2397 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2398 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2400 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2401 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2402 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2403 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2404 RTE_PTYPE_INNER_L4_FRAG,
2405 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2406 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2407 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2408 RTE_PTYPE_INNER_L4_NONFRAG,
2409 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2410 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2411 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2412 RTE_PTYPE_INNER_L4_UDP,
2414 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2415 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2416 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2417 RTE_PTYPE_INNER_L4_TCP,
2418 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2419 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2420 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2421 RTE_PTYPE_INNER_L4_SCTP,
2422 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2423 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2424 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2425 RTE_PTYPE_INNER_L4_ICMP,
2427 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2428 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2429 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2430 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2431 RTE_PTYPE_INNER_L4_FRAG,
2432 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2433 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2434 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2435 RTE_PTYPE_INNER_L4_NONFRAG,
2436 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2437 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2438 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2439 RTE_PTYPE_INNER_L4_UDP,
2441 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2442 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2443 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2444 RTE_PTYPE_INNER_L4_TCP,
2445 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2446 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2447 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2448 RTE_PTYPE_INNER_L4_SCTP,
2449 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2450 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2451 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2452 RTE_PTYPE_INNER_L4_ICMP,
2454 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2455 [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2456 RTE_PTYPE_TUNNEL_GRENAT |
2457 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2459 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2460 [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2461 RTE_PTYPE_TUNNEL_GRENAT |
2462 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2463 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2464 RTE_PTYPE_INNER_L4_FRAG,
2465 [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2466 RTE_PTYPE_TUNNEL_GRENAT |
2467 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2468 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2469 RTE_PTYPE_INNER_L4_NONFRAG,
2470 [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2471 RTE_PTYPE_TUNNEL_GRENAT |
2472 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2473 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2474 RTE_PTYPE_INNER_L4_UDP,
2476 [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2477 RTE_PTYPE_TUNNEL_GRENAT |
2478 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2479 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2480 RTE_PTYPE_INNER_L4_TCP,
2481 [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2482 RTE_PTYPE_TUNNEL_GRENAT |
2483 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2484 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2485 RTE_PTYPE_INNER_L4_SCTP,
2486 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2487 RTE_PTYPE_TUNNEL_GRENAT |
2488 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2489 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2490 RTE_PTYPE_INNER_L4_ICMP,
2492 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
2493 [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2494 RTE_PTYPE_TUNNEL_GRENAT |
2495 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2496 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2497 RTE_PTYPE_INNER_L4_FRAG,
2498 [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2499 RTE_PTYPE_TUNNEL_GRENAT |
2500 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2501 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2502 RTE_PTYPE_INNER_L4_NONFRAG,
2503 [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2504 RTE_PTYPE_TUNNEL_GRENAT |
2505 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2506 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2507 RTE_PTYPE_INNER_L4_UDP,
2509 [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2510 RTE_PTYPE_TUNNEL_GRENAT |
2511 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2512 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2513 RTE_PTYPE_INNER_L4_TCP,
2514 [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2515 RTE_PTYPE_TUNNEL_GRENAT |
2516 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2517 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2518 RTE_PTYPE_INNER_L4_SCTP,
2519 [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2520 RTE_PTYPE_TUNNEL_GRENAT |
2521 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2522 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2523 RTE_PTYPE_INNER_L4_ICMP,
2525 /* Non tunneled IPv6 */
2526 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2528 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2529 RTE_PTYPE_L4_NONFRAG,
2530 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2533 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2535 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2537 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2541 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2542 RTE_PTYPE_TUNNEL_IP |
2543 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2544 RTE_PTYPE_INNER_L4_FRAG,
2545 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2546 RTE_PTYPE_TUNNEL_IP |
2547 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2548 RTE_PTYPE_INNER_L4_NONFRAG,
2549 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2550 RTE_PTYPE_TUNNEL_IP |
2551 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2552 RTE_PTYPE_INNER_L4_UDP,
2554 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2555 RTE_PTYPE_TUNNEL_IP |
2556 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2557 RTE_PTYPE_INNER_L4_TCP,
2558 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2559 RTE_PTYPE_TUNNEL_IP |
2560 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2561 RTE_PTYPE_INNER_L4_SCTP,
2562 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2563 RTE_PTYPE_TUNNEL_IP |
2564 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2565 RTE_PTYPE_INNER_L4_ICMP,
2568 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2569 RTE_PTYPE_TUNNEL_IP |
2570 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2571 RTE_PTYPE_INNER_L4_FRAG,
2572 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2573 RTE_PTYPE_TUNNEL_IP |
2574 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2575 RTE_PTYPE_INNER_L4_NONFRAG,
2576 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2577 RTE_PTYPE_TUNNEL_IP |
2578 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2579 RTE_PTYPE_INNER_L4_UDP,
2580 /* [105] reserved */
2581 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2582 RTE_PTYPE_TUNNEL_IP |
2583 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2584 RTE_PTYPE_INNER_L4_TCP,
2585 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2586 RTE_PTYPE_TUNNEL_IP |
2587 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2588 RTE_PTYPE_INNER_L4_SCTP,
2589 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2590 RTE_PTYPE_TUNNEL_IP |
2591 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2592 RTE_PTYPE_INNER_L4_ICMP,
2594 /* IPv6 --> GRE/Teredo/VXLAN */
2595 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2596 RTE_PTYPE_TUNNEL_GRENAT,
2598 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2599 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2600 RTE_PTYPE_TUNNEL_GRENAT |
2601 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2602 RTE_PTYPE_INNER_L4_FRAG,
2603 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2604 RTE_PTYPE_TUNNEL_GRENAT |
2605 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2606 RTE_PTYPE_INNER_L4_NONFRAG,
2607 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2608 RTE_PTYPE_TUNNEL_GRENAT |
2609 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2610 RTE_PTYPE_INNER_L4_UDP,
2611 /* [113] reserved */
2612 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2613 RTE_PTYPE_TUNNEL_GRENAT |
2614 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2615 RTE_PTYPE_INNER_L4_TCP,
2616 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2617 RTE_PTYPE_TUNNEL_GRENAT |
2618 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2619 RTE_PTYPE_INNER_L4_SCTP,
2620 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2621 RTE_PTYPE_TUNNEL_GRENAT |
2622 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2623 RTE_PTYPE_INNER_L4_ICMP,
2625 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2626 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2627 RTE_PTYPE_TUNNEL_GRENAT |
2628 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2629 RTE_PTYPE_INNER_L4_FRAG,
2630 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2631 RTE_PTYPE_TUNNEL_GRENAT |
2632 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2633 RTE_PTYPE_INNER_L4_NONFRAG,
2634 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2635 RTE_PTYPE_TUNNEL_GRENAT |
2636 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2637 RTE_PTYPE_INNER_L4_UDP,
2638 /* [120] reserved */
2639 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2640 RTE_PTYPE_TUNNEL_GRENAT |
2641 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2642 RTE_PTYPE_INNER_L4_TCP,
2643 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2644 RTE_PTYPE_TUNNEL_GRENAT |
2645 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2646 RTE_PTYPE_INNER_L4_SCTP,
2647 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2648 RTE_PTYPE_TUNNEL_GRENAT |
2649 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2650 RTE_PTYPE_INNER_L4_ICMP,
2652 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2653 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2654 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2656 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2657 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2658 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2659 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2660 RTE_PTYPE_INNER_L4_FRAG,
2661 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2662 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2663 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2664 RTE_PTYPE_INNER_L4_NONFRAG,
2665 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2666 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2667 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2668 RTE_PTYPE_INNER_L4_UDP,
2669 /* [128] reserved */
2670 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2671 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2672 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2673 RTE_PTYPE_INNER_L4_TCP,
2674 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2675 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2676 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2677 RTE_PTYPE_INNER_L4_SCTP,
2678 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2679 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2680 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2681 RTE_PTYPE_INNER_L4_ICMP,
2683 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2684 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2685 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2686 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2687 RTE_PTYPE_INNER_L4_FRAG,
2688 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2689 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2690 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2691 RTE_PTYPE_INNER_L4_NONFRAG,
2692 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2693 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2694 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2695 RTE_PTYPE_INNER_L4_UDP,
2696 /* [135] reserved */
2697 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2698 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2699 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2700 RTE_PTYPE_INNER_L4_TCP,
2701 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2702 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2703 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2704 RTE_PTYPE_INNER_L4_SCTP,
2705 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2706 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2707 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2708 RTE_PTYPE_INNER_L4_ICMP,
2710 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2711 [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2712 RTE_PTYPE_TUNNEL_GRENAT |
2713 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2715 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2716 [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2717 RTE_PTYPE_TUNNEL_GRENAT |
2718 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2719 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2720 RTE_PTYPE_INNER_L4_FRAG,
2721 [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2722 RTE_PTYPE_TUNNEL_GRENAT |
2723 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2724 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2725 RTE_PTYPE_INNER_L4_NONFRAG,
2726 [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2727 RTE_PTYPE_TUNNEL_GRENAT |
2728 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2729 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2730 RTE_PTYPE_INNER_L4_UDP,
2731 /* [143] reserved */
2732 [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2733 RTE_PTYPE_TUNNEL_GRENAT |
2734 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2735 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2736 RTE_PTYPE_INNER_L4_TCP,
2737 [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2738 RTE_PTYPE_TUNNEL_GRENAT |
2739 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2740 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2741 RTE_PTYPE_INNER_L4_SCTP,
2742 [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2743 RTE_PTYPE_TUNNEL_GRENAT |
2744 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2745 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2746 RTE_PTYPE_INNER_L4_ICMP,
2748 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
2749 [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2750 RTE_PTYPE_TUNNEL_GRENAT |
2751 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2752 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2753 RTE_PTYPE_INNER_L4_FRAG,
2754 [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2755 RTE_PTYPE_TUNNEL_GRENAT |
2756 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2757 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2758 RTE_PTYPE_INNER_L4_NONFRAG,
2759 [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2760 RTE_PTYPE_TUNNEL_GRENAT |
2761 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2762 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2763 RTE_PTYPE_INNER_L4_UDP,
2764 /* [150] reserved */
2765 [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2766 RTE_PTYPE_TUNNEL_GRENAT |
2767 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2768 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2769 RTE_PTYPE_INNER_L4_TCP,
2770 [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2771 RTE_PTYPE_TUNNEL_GRENAT |
2772 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2773 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2774 RTE_PTYPE_INNER_L4_SCTP,
2775 [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2776 RTE_PTYPE_TUNNEL_GRENAT |
2777 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2778 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2779 RTE_PTYPE_INNER_L4_ICMP,
2780 /* [154] - [255] reserved */
2781 [256] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2782 RTE_PTYPE_TUNNEL_GTPC,
2783 [257] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2784 RTE_PTYPE_TUNNEL_GTPC,
2785 [258] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2786 RTE_PTYPE_TUNNEL_GTPU,
2787 [259] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2788 RTE_PTYPE_TUNNEL_GTPU,
2789 /* [260] - [263] reserved */
2790 [264] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2791 RTE_PTYPE_TUNNEL_GTPC,
2792 [265] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2793 RTE_PTYPE_TUNNEL_GTPC,
2794 [266] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2795 RTE_PTYPE_TUNNEL_GTPU,
2796 [267] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2797 RTE_PTYPE_TUNNEL_GTPU,
2799 /* All others reserved */
2802 return type_table[ptype];
2805 void __attribute__((cold))
2806 ice_set_default_ptype_table(struct rte_eth_dev *dev)
2808 struct ice_adapter *ad =
2809 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2812 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
2813 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);