1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
10 #define ICE_TD_CMD ICE_TX_DESC_CMD_EOP
12 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
16 PKT_TX_OUTER_IP_CKSUM)
18 #define ICE_RX_ERR_BITS 0x3f
20 static enum ice_status
21 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
23 struct ice_vsi *vsi = rxq->vsi;
24 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
25 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
26 struct ice_rlan_ctx rx_ctx;
28 uint16_t buf_size, len;
29 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
33 * The kernel driver uses flex descriptor. It sets the register
34 * to flex descriptor mode.
35 * DPDK uses legacy descriptor. It should set the register back
36 * to the default value, then uses legacy descriptor mode.
38 regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
39 QRXFLXP_CNTXT_RXDID_PRIO_M;
40 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
42 /* Set buffer size as the head split is disabled. */
43 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
44 RTE_PKTMBUF_HEADROOM);
46 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
47 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
48 rxq->max_pkt_len = RTE_MIN(len,
49 dev->data->dev_conf.rxmode.max_rx_pkt_len);
51 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
52 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
53 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
54 PMD_DRV_LOG(ERR, "maximum packet length must "
55 "be larger than %u and smaller than %u,"
56 "as jumbo frame is enabled",
57 (uint32_t)ETHER_MAX_LEN,
58 (uint32_t)ICE_FRAME_SIZE_MAX);
62 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
63 rxq->max_pkt_len > ETHER_MAX_LEN) {
64 PMD_DRV_LOG(ERR, "maximum packet length must be "
65 "larger than %u and smaller than %u, "
66 "as jumbo frame is disabled",
67 (uint32_t)ETHER_MIN_LEN,
68 (uint32_t)ETHER_MAX_LEN);
73 memset(&rx_ctx, 0, sizeof(rx_ctx));
75 rx_ctx.base = rxq->rx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
76 rx_ctx.qlen = rxq->nb_rx_desc;
77 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
78 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
79 rx_ctx.dtype = 0; /* No Header Split mode */
80 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
81 rx_ctx.dsize = 1; /* 32B descriptors */
83 rx_ctx.rxmax = rxq->max_pkt_len;
84 /* TPH: Transaction Layer Packet (TLP) processing hints */
85 rx_ctx.tphrdesc_ena = 1;
86 rx_ctx.tphwdesc_ena = 1;
87 rx_ctx.tphdata_ena = 1;
88 rx_ctx.tphhead_ena = 1;
89 /* Low Receive Queue Threshold defined in 64 descriptors units.
90 * When the number of free descriptors goes below the lrxqthresh,
91 * an immediate interrupt is triggered.
93 rx_ctx.lrxqthresh = 2;
94 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
97 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
99 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
101 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
105 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
107 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
112 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
113 RTE_PKTMBUF_HEADROOM);
115 /* Check if scattered RX needs to be used. */
116 if ((rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size)
117 dev->data->scattered_rx = 1;
119 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
121 /* Init the Rx tail register*/
122 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
127 /* Allocate mbufs for all descriptors in rx queue */
129 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
131 struct ice_rx_entry *rxe = rxq->sw_ring;
135 for (i = 0; i < rxq->nb_rx_desc; i++) {
136 volatile union ice_rx_desc *rxd;
137 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
139 if (unlikely(!mbuf)) {
140 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
144 rte_mbuf_refcnt_set(mbuf, 1);
146 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
148 mbuf->port = rxq->port_id;
151 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
153 rxd = &rxq->rx_ring[i];
154 rxd->read.pkt_addr = dma_addr;
155 rxd->read.hdr_addr = 0;
156 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
166 /* Free all mbufs for descriptors in rx queue */
168 ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
172 if (!rxq || !rxq->sw_ring) {
173 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
177 for (i = 0; i < rxq->nb_rx_desc; i++) {
178 if (rxq->sw_ring[i].mbuf) {
179 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
180 rxq->sw_ring[i].mbuf = NULL;
183 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
184 if (rxq->rx_nb_avail == 0)
186 for (i = 0; i < rxq->rx_nb_avail; i++) {
187 struct rte_mbuf *mbuf;
189 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
190 rte_pktmbuf_free_seg(mbuf);
192 rxq->rx_nb_avail = 0;
193 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
196 /* turn on or off rx queue
197 * @q_idx: queue index in pf scope
198 * @on: turn on or off the queue
201 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
206 /* QRX_CTRL = QRX_ENA */
207 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
210 if (reg & QRX_CTRL_QENA_STAT_M)
211 return 0; /* Already on, skip */
212 reg |= QRX_CTRL_QENA_REQ_M;
214 if (!(reg & QRX_CTRL_QENA_STAT_M))
215 return 0; /* Already off, skip */
216 reg &= ~QRX_CTRL_QENA_REQ_M;
219 /* Write the register */
220 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
221 /* Check the result. It is said that QENA_STAT
222 * follows the QENA_REQ not more than 10 use.
223 * TODO: need to change the wait counter later
225 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
226 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
227 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
229 if ((reg & QRX_CTRL_QENA_REQ_M) &&
230 (reg & QRX_CTRL_QENA_STAT_M))
233 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
234 !(reg & QRX_CTRL_QENA_STAT_M))
239 /* Check if it is timeout */
240 if (j >= ICE_CHK_Q_ENA_COUNT) {
241 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
242 (on ? "enable" : "disable"), q_idx);
250 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
251 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
253 ice_check_rx_burst_bulk_alloc_preconditions
254 (__rte_unused struct ice_rx_queue *rxq)
259 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
260 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
261 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
262 "rxq->rx_free_thresh=%d, "
263 "ICE_RX_MAX_BURST=%d",
264 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
266 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
267 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
268 "rxq->rx_free_thresh=%d, "
269 "rxq->nb_rx_desc=%d",
270 rxq->rx_free_thresh, rxq->nb_rx_desc);
272 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
273 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
274 "rxq->nb_rx_desc=%d, "
275 "rxq->rx_free_thresh=%d",
276 rxq->nb_rx_desc, rxq->rx_free_thresh);
286 /* reset fields in ice_rx_queue back to default */
288 ice_reset_rx_queue(struct ice_rx_queue *rxq)
294 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
298 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
299 if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
300 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
302 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
303 len = rxq->nb_rx_desc;
305 for (i = 0; i < len * sizeof(union ice_rx_desc); i++)
306 ((volatile char *)rxq->rx_ring)[i] = 0;
308 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
309 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
310 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
311 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
313 rxq->rx_nb_avail = 0;
314 rxq->rx_next_avail = 0;
315 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
316 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
320 rxq->pkt_first_seg = NULL;
321 rxq->pkt_last_seg = NULL;
325 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
327 struct ice_rx_queue *rxq;
329 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
331 PMD_INIT_FUNC_TRACE();
333 if (rx_queue_id >= dev->data->nb_rx_queues) {
334 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
335 rx_queue_id, dev->data->nb_rx_queues);
339 rxq = dev->data->rx_queues[rx_queue_id];
340 if (!rxq || !rxq->q_set) {
341 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
346 err = ice_program_hw_rx_queue(rxq);
348 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
353 err = ice_alloc_rx_queue_mbufs(rxq);
355 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
361 /* Init the RX tail register. */
362 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
364 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
366 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
369 ice_rx_queue_release_mbufs(rxq);
370 ice_reset_rx_queue(rxq);
374 dev->data->rx_queue_state[rx_queue_id] =
375 RTE_ETH_QUEUE_STATE_STARTED;
381 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
383 struct ice_rx_queue *rxq;
385 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
387 if (rx_queue_id < dev->data->nb_rx_queues) {
388 rxq = dev->data->rx_queues[rx_queue_id];
390 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
392 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
396 ice_rx_queue_release_mbufs(rxq);
397 ice_reset_rx_queue(rxq);
398 dev->data->rx_queue_state[rx_queue_id] =
399 RTE_ETH_QUEUE_STATE_STOPPED;
406 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
408 struct ice_tx_queue *txq;
412 struct ice_aqc_add_tx_qgrp txq_elem;
413 struct ice_tlan_ctx tx_ctx;
415 PMD_INIT_FUNC_TRACE();
417 if (tx_queue_id >= dev->data->nb_tx_queues) {
418 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
419 tx_queue_id, dev->data->nb_tx_queues);
423 txq = dev->data->tx_queues[tx_queue_id];
424 if (!txq || !txq->q_set) {
425 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
431 hw = ICE_VSI_TO_HW(vsi);
433 memset(&txq_elem, 0, sizeof(txq_elem));
434 memset(&tx_ctx, 0, sizeof(tx_ctx));
435 txq_elem.num_txqs = 1;
436 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
438 tx_ctx.base = txq->tx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
439 tx_ctx.qlen = txq->nb_tx_desc;
440 tx_ctx.pf_num = hw->pf_id;
441 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
442 tx_ctx.src_vsi = vsi->vsi_id;
443 tx_ctx.port_num = hw->port_info->lport;
444 tx_ctx.tso_ena = 1; /* tso enable */
445 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
446 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
448 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
451 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
453 /* Init the Tx tail register*/
454 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
456 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1, &txq_elem,
457 sizeof(txq_elem), NULL);
459 PMD_DRV_LOG(ERR, "Failed to add lan txq");
462 /* store the schedule node id */
463 txq->q_teid = txq_elem.txqs[0].q_teid;
465 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
469 /* Free all mbufs for descriptors in tx queue */
471 ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
475 if (!txq || !txq->sw_ring) {
476 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
480 for (i = 0; i < txq->nb_tx_desc; i++) {
481 if (txq->sw_ring[i].mbuf) {
482 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
483 txq->sw_ring[i].mbuf = NULL;
489 ice_reset_tx_queue(struct ice_tx_queue *txq)
491 struct ice_tx_entry *txe;
492 uint16_t i, prev, size;
495 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
500 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
501 for (i = 0; i < size; i++)
502 ((volatile char *)txq->tx_ring)[i] = 0;
504 prev = (uint16_t)(txq->nb_tx_desc - 1);
505 for (i = 0; i < txq->nb_tx_desc; i++) {
506 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
508 txd->cmd_type_offset_bsz =
509 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
512 txe[prev].next_id = i;
516 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
517 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
522 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
523 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
527 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
529 struct ice_tx_queue *txq;
530 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
531 enum ice_status status;
535 if (tx_queue_id >= dev->data->nb_tx_queues) {
536 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
537 tx_queue_id, dev->data->nb_tx_queues);
541 txq = dev->data->tx_queues[tx_queue_id];
543 PMD_DRV_LOG(ERR, "TX queue %u is not available",
548 q_ids[0] = txq->reg_idx;
549 q_teids[0] = txq->q_teid;
551 status = ice_dis_vsi_txq(hw->port_info, 1, q_ids, q_teids,
552 ICE_NO_RESET, 0, NULL);
553 if (status != ICE_SUCCESS) {
554 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
558 ice_tx_queue_release_mbufs(txq);
559 ice_reset_tx_queue(txq);
560 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
566 ice_rx_queue_setup(struct rte_eth_dev *dev,
569 unsigned int socket_id,
570 const struct rte_eth_rxconf *rx_conf,
571 struct rte_mempool *mp)
573 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
574 struct ice_adapter *ad =
575 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
576 struct ice_vsi *vsi = pf->main_vsi;
577 struct ice_rx_queue *rxq;
578 const struct rte_memzone *rz;
581 int use_def_burst_func = 1;
583 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
584 nb_desc > ICE_MAX_RING_DESC ||
585 nb_desc < ICE_MIN_RING_DESC) {
586 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
591 /* Free memory if needed */
592 if (dev->data->rx_queues[queue_idx]) {
593 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
594 dev->data->rx_queues[queue_idx] = NULL;
597 /* Allocate the rx queue data structure */
598 rxq = rte_zmalloc_socket(NULL,
599 sizeof(struct ice_rx_queue),
603 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
604 "rx queue data structure");
608 rxq->nb_rx_desc = nb_desc;
609 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
610 rxq->queue_id = queue_idx;
612 rxq->reg_idx = vsi->base_queue + queue_idx;
613 rxq->port_id = dev->data->port_id;
614 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
615 rxq->crc_len = ETHER_CRC_LEN;
619 rxq->drop_en = rx_conf->rx_drop_en;
621 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
623 /* Allocate the maximun number of RX ring hardware descriptor. */
624 len = ICE_MAX_RING_DESC;
626 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
628 * Allocating a little more memory because vectorized/bulk_alloc Rx
629 * functions doesn't check boundaries each time.
631 len += ICE_RX_MAX_BURST;
634 /* Allocate the maximum number of RX ring hardware descriptor. */
635 ring_size = sizeof(union ice_rx_desc) * len;
636 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
637 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
638 ring_size, ICE_RING_BASE_ALIGN,
641 ice_rx_queue_release(rxq);
642 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
646 /* Zero all the descriptors in the ring. */
647 memset(rz->addr, 0, ring_size);
649 rxq->rx_ring_phys_addr = rz->phys_addr;
650 rxq->rx_ring = (union ice_rx_desc *)rz->addr;
652 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
653 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
658 /* Allocate the software ring. */
659 rxq->sw_ring = rte_zmalloc_socket(NULL,
660 sizeof(struct ice_rx_entry) * len,
664 ice_rx_queue_release(rxq);
665 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
669 ice_reset_rx_queue(rxq);
671 dev->data->rx_queues[queue_idx] = rxq;
673 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
675 if (!use_def_burst_func) {
676 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
677 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
678 "satisfied. Rx Burst Bulk Alloc function will be "
679 "used on port=%d, queue=%d.",
680 rxq->port_id, rxq->queue_id);
681 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
683 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
684 "not satisfied, Scattered Rx is requested, "
685 "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
686 "not enabled on port=%d, queue=%d.",
687 rxq->port_id, rxq->queue_id);
688 ad->rx_bulk_alloc_allowed = false;
695 ice_rx_queue_release(void *rxq)
697 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
700 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
704 ice_rx_queue_release_mbufs(q);
705 rte_free(q->sw_ring);
710 ice_tx_queue_setup(struct rte_eth_dev *dev,
713 unsigned int socket_id,
714 const struct rte_eth_txconf *tx_conf)
716 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
717 struct ice_vsi *vsi = pf->main_vsi;
718 struct ice_tx_queue *txq;
719 const struct rte_memzone *tz;
721 uint16_t tx_rs_thresh, tx_free_thresh;
724 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
726 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
727 nb_desc > ICE_MAX_RING_DESC ||
728 nb_desc < ICE_MIN_RING_DESC) {
729 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
735 * The following two parameters control the setting of the RS bit on
736 * transmit descriptors. TX descriptors will have their RS bit set
737 * after txq->tx_rs_thresh descriptors have been used. The TX
738 * descriptor ring will be cleaned after txq->tx_free_thresh
739 * descriptors are used or if the number of descriptors required to
740 * transmit a packet is greater than the number of free TX descriptors.
742 * The following constraints must be satisfied:
743 * - tx_rs_thresh must be greater than 0.
744 * - tx_rs_thresh must be less than the size of the ring minus 2.
745 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
746 * - tx_rs_thresh must be a divisor of the ring size.
747 * - tx_free_thresh must be greater than 0.
748 * - tx_free_thresh must be less than the size of the ring minus 3.
750 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
751 * race condition, hence the maximum threshold constraints. When set
752 * to zero use default values.
754 tx_rs_thresh = (uint16_t)(tx_conf->tx_rs_thresh ?
755 tx_conf->tx_rs_thresh :
756 ICE_DEFAULT_TX_RSBIT_THRESH);
757 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
758 tx_conf->tx_free_thresh :
759 ICE_DEFAULT_TX_FREE_THRESH);
760 if (tx_rs_thresh >= (nb_desc - 2)) {
761 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
762 "number of TX descriptors minus 2. "
763 "(tx_rs_thresh=%u port=%d queue=%d)",
764 (unsigned int)tx_rs_thresh,
765 (int)dev->data->port_id,
769 if (tx_free_thresh >= (nb_desc - 3)) {
770 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
771 "tx_free_thresh must be less than the "
772 "number of TX descriptors minus 3. "
773 "(tx_free_thresh=%u port=%d queue=%d)",
774 (unsigned int)tx_free_thresh,
775 (int)dev->data->port_id,
779 if (tx_rs_thresh > tx_free_thresh) {
780 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
781 "equal to tx_free_thresh. (tx_free_thresh=%u"
782 " tx_rs_thresh=%u port=%d queue=%d)",
783 (unsigned int)tx_free_thresh,
784 (unsigned int)tx_rs_thresh,
785 (int)dev->data->port_id,
789 if ((nb_desc % tx_rs_thresh) != 0) {
790 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
791 "number of TX descriptors. (tx_rs_thresh=%u"
792 " port=%d queue=%d)",
793 (unsigned int)tx_rs_thresh,
794 (int)dev->data->port_id,
798 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
799 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
800 "tx_rs_thresh is greater than 1. "
801 "(tx_rs_thresh=%u port=%d queue=%d)",
802 (unsigned int)tx_rs_thresh,
803 (int)dev->data->port_id,
808 /* Free memory if needed. */
809 if (dev->data->tx_queues[queue_idx]) {
810 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
811 dev->data->tx_queues[queue_idx] = NULL;
814 /* Allocate the TX queue data structure. */
815 txq = rte_zmalloc_socket(NULL,
816 sizeof(struct ice_tx_queue),
820 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
821 "tx queue structure");
825 /* Allocate TX hardware ring descriptors. */
826 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
827 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
828 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
829 ring_size, ICE_RING_BASE_ALIGN,
832 ice_tx_queue_release(txq);
833 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
837 txq->nb_tx_desc = nb_desc;
838 txq->tx_rs_thresh = tx_rs_thresh;
839 txq->tx_free_thresh = tx_free_thresh;
840 txq->pthresh = tx_conf->tx_thresh.pthresh;
841 txq->hthresh = tx_conf->tx_thresh.hthresh;
842 txq->wthresh = tx_conf->tx_thresh.wthresh;
843 txq->queue_id = queue_idx;
845 txq->reg_idx = vsi->base_queue + queue_idx;
846 txq->port_id = dev->data->port_id;
847 txq->offloads = offloads;
849 txq->tx_deferred_start = tx_conf->tx_deferred_start;
851 txq->tx_ring_phys_addr = tz->phys_addr;
852 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
854 /* Allocate software ring */
856 rte_zmalloc_socket(NULL,
857 sizeof(struct ice_tx_entry) * nb_desc,
861 ice_tx_queue_release(txq);
862 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
866 ice_reset_tx_queue(txq);
868 dev->data->tx_queues[queue_idx] = txq;
874 ice_tx_queue_release(void *txq)
876 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
879 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
883 ice_tx_queue_release_mbufs(q);
884 rte_free(q->sw_ring);
889 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
890 struct rte_eth_rxq_info *qinfo)
892 struct ice_rx_queue *rxq;
894 rxq = dev->data->rx_queues[queue_id];
897 qinfo->scattered_rx = dev->data->scattered_rx;
898 qinfo->nb_desc = rxq->nb_rx_desc;
900 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
901 qinfo->conf.rx_drop_en = rxq->drop_en;
902 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
906 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
907 struct rte_eth_txq_info *qinfo)
909 struct ice_tx_queue *txq;
911 txq = dev->data->tx_queues[queue_id];
913 qinfo->nb_desc = txq->nb_tx_desc;
915 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
916 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
917 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
919 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
920 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
921 qinfo->conf.offloads = txq->offloads;
922 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
926 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
928 #define ICE_RXQ_SCAN_INTERVAL 4
929 volatile union ice_rx_desc *rxdp;
930 struct ice_rx_queue *rxq;
933 rxq = dev->data->rx_queues[rx_queue_id];
934 rxdp = &rxq->rx_ring[rxq->rx_tail];
935 while ((desc < rxq->nb_rx_desc) &&
936 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
937 ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S) &
938 (1 << ICE_RX_DESC_STATUS_DD_S)) {
940 * Check the DD bit of a rx descriptor of each 4 in a group,
941 * to avoid checking too frequently and downgrading performance
944 desc += ICE_RXQ_SCAN_INTERVAL;
945 rxdp += ICE_RXQ_SCAN_INTERVAL;
946 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
947 rxdp = &(rxq->rx_ring[rxq->rx_tail +
948 desc - rxq->nb_rx_desc]);
954 /* Translate the rx descriptor status to pkt flags */
955 static inline uint64_t
956 ice_rxd_status_to_pkt_flags(uint64_t qword)
960 /* Check if RSS_HASH */
961 flags = (((qword >> ICE_RX_DESC_STATUS_FLTSTAT_S) &
962 ICE_RX_DESC_FLTSTAT_RSS_HASH) ==
963 ICE_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
968 /* Rx L3/L4 checksum */
969 static inline uint64_t
970 ice_rxd_error_to_pkt_flags(uint64_t qword)
973 uint64_t error_bits = (qword >> ICE_RXD_QW1_ERROR_S);
975 if (likely((error_bits & ICE_RX_ERR_BITS) == 0)) {
976 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
980 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_IPE_S)))
981 flags |= PKT_RX_IP_CKSUM_BAD;
983 flags |= PKT_RX_IP_CKSUM_GOOD;
985 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_L4E_S)))
986 flags |= PKT_RX_L4_CKSUM_BAD;
988 flags |= PKT_RX_L4_CKSUM_GOOD;
990 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_EIPE_S)))
991 flags |= PKT_RX_EIP_CKSUM_BAD;
997 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)
999 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1000 (1 << ICE_RX_DESC_STATUS_L2TAG1P_S)) {
1001 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1003 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1004 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1005 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
1010 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1011 if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
1012 (1 << ICE_RX_DESC_EXT_STATUS_L2TAG2P_S)) {
1013 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1014 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1015 mb->vlan_tci_outer = mb->vlan_tci;
1016 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
1017 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1018 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
1019 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
1021 mb->vlan_tci_outer = 0;
1024 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1025 mb->vlan_tci, mb->vlan_tci_outer);
1028 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1029 #define ICE_LOOK_AHEAD 8
1030 #if (ICE_LOOK_AHEAD != 8)
1031 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1034 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1036 volatile union ice_rx_desc *rxdp;
1037 struct ice_rx_entry *rxep;
1038 struct rte_mbuf *mb;
1042 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1043 int32_t i, j, nb_rx = 0;
1044 uint64_t pkt_flags = 0;
1045 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1047 rxdp = &rxq->rx_ring[rxq->rx_tail];
1048 rxep = &rxq->sw_ring[rxq->rx_tail];
1050 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1051 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S;
1053 /* Make sure there is at least 1 packet to receive */
1054 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1058 * Scan LOOK_AHEAD descriptors at a time to determine which
1059 * descriptors reference packets that are ready to be received.
1061 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1062 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1063 /* Read desc statuses backwards to avoid race condition */
1064 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--) {
1065 qword1 = rte_le_to_cpu_64(
1066 rxdp[j].wb.qword1.status_error_len);
1067 s[j] = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1068 ICE_RXD_QW1_STATUS_S;
1073 /* Compute how many status bits were set */
1074 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1075 nb_dd += s[j] & (1 << ICE_RX_DESC_STATUS_DD_S);
1079 /* Translate descriptor info to mbuf parameters */
1080 for (j = 0; j < nb_dd; j++) {
1082 qword1 = rte_le_to_cpu_64(
1083 rxdp[j].wb.qword1.status_error_len);
1084 pkt_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1085 ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
1086 mb->data_len = pkt_len;
1087 mb->pkt_len = pkt_len;
1089 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1090 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1091 if (pkt_flags & PKT_RX_RSS_HASH)
1094 rxdp[j].wb.qword0.hi_dword.rss);
1095 mb->packet_type = ptype_tbl[(uint8_t)(
1097 ICE_RXD_QW1_PTYPE_M) >>
1098 ICE_RXD_QW1_PTYPE_S)];
1099 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1101 mb->ol_flags |= pkt_flags;
1104 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1105 rxq->rx_stage[i + j] = rxep[j].mbuf;
1107 if (nb_dd != ICE_LOOK_AHEAD)
1111 /* Clear software ring entries */
1112 for (i = 0; i < nb_rx; i++)
1113 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1115 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1116 "port_id=%u, queue_id=%u, nb_rx=%d",
1117 rxq->port_id, rxq->queue_id, nb_rx);
1122 static inline uint16_t
1123 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1124 struct rte_mbuf **rx_pkts,
1128 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1130 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1132 for (i = 0; i < nb_pkts; i++)
1133 rx_pkts[i] = stage[i];
1135 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1136 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1142 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1144 volatile union ice_rx_desc *rxdp;
1145 struct ice_rx_entry *rxep;
1146 struct rte_mbuf *mb;
1147 uint16_t alloc_idx, i;
1151 /* Allocate buffers in bulk */
1152 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1153 (rxq->rx_free_thresh - 1));
1154 rxep = &rxq->sw_ring[alloc_idx];
1155 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1156 rxq->rx_free_thresh);
1157 if (unlikely(diag != 0)) {
1158 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1162 rxdp = &rxq->rx_ring[alloc_idx];
1163 for (i = 0; i < rxq->rx_free_thresh; i++) {
1164 if (likely(i < (rxq->rx_free_thresh - 1)))
1165 /* Prefetch next mbuf */
1166 rte_prefetch0(rxep[i + 1].mbuf);
1169 rte_mbuf_refcnt_set(mb, 1);
1171 mb->data_off = RTE_PKTMBUF_HEADROOM;
1173 mb->port = rxq->port_id;
1174 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1175 rxdp[i].read.hdr_addr = 0;
1176 rxdp[i].read.pkt_addr = dma_addr;
1179 /* Update rx tail regsiter */
1181 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1183 rxq->rx_free_trigger =
1184 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1185 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1186 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1191 static inline uint16_t
1192 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1194 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1196 struct rte_eth_dev *dev;
1201 if (rxq->rx_nb_avail)
1202 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1204 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1205 rxq->rx_next_avail = 0;
1206 rxq->rx_nb_avail = nb_rx;
1207 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1209 if (rxq->rx_tail > rxq->rx_free_trigger) {
1210 if (ice_rx_alloc_bufs(rxq) != 0) {
1213 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1214 dev->data->rx_mbuf_alloc_failed +=
1215 rxq->rx_free_thresh;
1216 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1217 "port_id=%u, queue_id=%u",
1218 rxq->port_id, rxq->queue_id);
1219 rxq->rx_nb_avail = 0;
1220 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1221 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1222 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1228 if (rxq->rx_tail >= rxq->nb_rx_desc)
1231 if (rxq->rx_nb_avail)
1232 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1238 ice_recv_pkts_bulk_alloc(void *rx_queue,
1239 struct rte_mbuf **rx_pkts,
1246 if (unlikely(nb_pkts == 0))
1249 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1250 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1253 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1254 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1255 nb_rx = (uint16_t)(nb_rx + count);
1256 nb_pkts = (uint16_t)(nb_pkts - count);
1265 ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
1266 struct rte_mbuf __rte_unused **rx_pkts,
1267 uint16_t __rte_unused nb_pkts)
1271 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
1274 ice_recv_scattered_pkts(void *rx_queue,
1275 struct rte_mbuf **rx_pkts,
1278 struct ice_rx_queue *rxq = rx_queue;
1279 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1280 volatile union ice_rx_desc *rxdp;
1281 union ice_rx_desc rxd;
1282 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1283 struct ice_rx_entry *rxe;
1284 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1285 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1286 struct rte_mbuf *nmb; /* new allocated mbuf */
1287 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1288 uint16_t rx_id = rxq->rx_tail;
1290 uint16_t nb_hold = 0;
1291 uint16_t rx_packet_len;
1295 uint64_t pkt_flags = 0;
1296 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1297 struct rte_eth_dev *dev;
1299 while (nb_rx < nb_pkts) {
1300 rxdp = &rx_ring[rx_id];
1301 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1302 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1303 ICE_RXD_QW1_STATUS_S;
1305 /* Check the DD bit first */
1306 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1310 nmb = rte_mbuf_raw_alloc(rxq->mp);
1311 if (unlikely(!nmb)) {
1312 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1313 dev->data->rx_mbuf_alloc_failed++;
1316 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1319 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1321 if (unlikely(rx_id == rxq->nb_rx_desc))
1324 /* Prefetch next mbuf */
1325 rte_prefetch0(sw_ring[rx_id].mbuf);
1328 * When next RX descriptor is on a cache line boundary,
1329 * prefetch the next 4 RX descriptors and next 8 pointers
1332 if ((rx_id & 0x3) == 0) {
1333 rte_prefetch0(&rx_ring[rx_id]);
1334 rte_prefetch0(&sw_ring[rx_id]);
1340 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1342 /* Set data buffer address and data length of the mbuf */
1343 rxdp->read.hdr_addr = 0;
1344 rxdp->read.pkt_addr = dma_addr;
1345 rx_packet_len = (qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1346 ICE_RXD_QW1_LEN_PBUF_S;
1347 rxm->data_len = rx_packet_len;
1348 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1349 ice_rxd_to_vlan_tci(rxm, rxdp);
1350 rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
1351 ICE_RXD_QW1_PTYPE_M) >>
1352 ICE_RXD_QW1_PTYPE_S)];
1355 * If this is the first buffer of the received packet, set the
1356 * pointer to the first mbuf of the packet and initialize its
1357 * context. Otherwise, update the total length and the number
1358 * of segments of the current scattered packet, and update the
1359 * pointer to the last mbuf of the current packet.
1363 first_seg->nb_segs = 1;
1364 first_seg->pkt_len = rx_packet_len;
1366 first_seg->pkt_len =
1367 (uint16_t)(first_seg->pkt_len +
1369 first_seg->nb_segs++;
1370 last_seg->next = rxm;
1374 * If this is not the last buffer of the received packet,
1375 * update the pointer to the last mbuf of the current scattered
1376 * packet and continue to parse the RX ring.
1378 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_EOF_S))) {
1384 * This is the last buffer of the received packet. If the CRC
1385 * is not stripped by the hardware:
1386 * - Subtract the CRC length from the total packet length.
1387 * - If the last buffer only contains the whole CRC or a part
1388 * of it, free the mbuf associated to the last buffer. If part
1389 * of the CRC is also contained in the previous mbuf, subtract
1390 * the length of that CRC part from the data length of the
1394 if (unlikely(rxq->crc_len > 0)) {
1395 first_seg->pkt_len -= ETHER_CRC_LEN;
1396 if (rx_packet_len <= ETHER_CRC_LEN) {
1397 rte_pktmbuf_free_seg(rxm);
1398 first_seg->nb_segs--;
1399 last_seg->data_len =
1400 (uint16_t)(last_seg->data_len -
1401 (ETHER_CRC_LEN - rx_packet_len));
1402 last_seg->next = NULL;
1404 rxm->data_len = (uint16_t)(rx_packet_len -
1408 first_seg->port = rxq->port_id;
1409 first_seg->ol_flags = 0;
1411 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1412 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1413 if (pkt_flags & PKT_RX_RSS_HASH)
1414 first_seg->hash.rss =
1415 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1417 first_seg->ol_flags |= pkt_flags;
1418 /* Prefetch data of first segment, if configured to do so. */
1419 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1420 first_seg->data_off));
1421 rx_pkts[nb_rx++] = first_seg;
1425 /* Record index of the next RX descriptor to probe. */
1426 rxq->rx_tail = rx_id;
1427 rxq->pkt_first_seg = first_seg;
1428 rxq->pkt_last_seg = last_seg;
1431 * If the number of free RX descriptors is greater than the RX free
1432 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1433 * register. Update the RDT with the value of the last processed RX
1434 * descriptor minus 1, to guarantee that the RDT register is never
1435 * equal to the RDH register, which creates a "full" ring situtation
1436 * from the hardware point of view.
1438 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1439 if (nb_hold > rxq->rx_free_thresh) {
1440 rx_id = (uint16_t)(rx_id == 0 ?
1441 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1442 /* write TAIL register */
1443 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1446 rxq->nb_rx_hold = nb_hold;
1448 /* return received packet in the burst */
1453 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1455 static const uint32_t ptypes[] = {
1456 /* refers to ice_get_default_pkt_type() */
1458 RTE_PTYPE_L2_ETHER_LLDP,
1459 RTE_PTYPE_L2_ETHER_ARP,
1460 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1461 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1464 RTE_PTYPE_L4_NONFRAG,
1468 RTE_PTYPE_TUNNEL_GRENAT,
1469 RTE_PTYPE_TUNNEL_IP,
1470 RTE_PTYPE_INNER_L2_ETHER,
1471 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1472 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1473 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1474 RTE_PTYPE_INNER_L4_FRAG,
1475 RTE_PTYPE_INNER_L4_ICMP,
1476 RTE_PTYPE_INNER_L4_NONFRAG,
1477 RTE_PTYPE_INNER_L4_SCTP,
1478 RTE_PTYPE_INNER_L4_TCP,
1479 RTE_PTYPE_INNER_L4_UDP,
1480 RTE_PTYPE_TUNNEL_GTPC,
1481 RTE_PTYPE_TUNNEL_GTPU,
1485 if (dev->rx_pkt_burst == ice_recv_pkts ||
1486 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1487 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1489 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1495 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1497 struct ice_rx_queue *rxq = rx_queue;
1498 volatile uint64_t *status;
1502 if (unlikely(offset >= rxq->nb_rx_desc))
1505 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1506 return RTE_ETH_RX_DESC_UNAVAIL;
1508 desc = rxq->rx_tail + offset;
1509 if (desc >= rxq->nb_rx_desc)
1510 desc -= rxq->nb_rx_desc;
1512 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
1513 mask = rte_cpu_to_le_64((1ULL << ICE_RX_DESC_STATUS_DD_S) <<
1514 ICE_RXD_QW1_STATUS_S);
1516 return RTE_ETH_RX_DESC_DONE;
1518 return RTE_ETH_RX_DESC_AVAIL;
1522 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1524 struct ice_tx_queue *txq = tx_queue;
1525 volatile uint64_t *status;
1526 uint64_t mask, expect;
1529 if (unlikely(offset >= txq->nb_tx_desc))
1532 desc = txq->tx_tail + offset;
1533 /* go to next desc that has the RS bit */
1534 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1536 if (desc >= txq->nb_tx_desc) {
1537 desc -= txq->nb_tx_desc;
1538 if (desc >= txq->nb_tx_desc)
1539 desc -= txq->nb_tx_desc;
1542 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1543 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1544 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1545 ICE_TXD_QW1_DTYPE_S);
1546 if ((*status & mask) == expect)
1547 return RTE_ETH_TX_DESC_DONE;
1549 return RTE_ETH_TX_DESC_FULL;
1553 ice_clear_queues(struct rte_eth_dev *dev)
1557 PMD_INIT_FUNC_TRACE();
1559 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1560 ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
1561 ice_reset_tx_queue(dev->data->tx_queues[i]);
1564 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1565 ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
1566 ice_reset_rx_queue(dev->data->rx_queues[i]);
1571 ice_free_queues(struct rte_eth_dev *dev)
1575 PMD_INIT_FUNC_TRACE();
1577 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1578 if (!dev->data->rx_queues[i])
1580 ice_rx_queue_release(dev->data->rx_queues[i]);
1581 dev->data->rx_queues[i] = NULL;
1583 dev->data->nb_rx_queues = 0;
1585 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1586 if (!dev->data->tx_queues[i])
1588 ice_tx_queue_release(dev->data->tx_queues[i]);
1589 dev->data->tx_queues[i] = NULL;
1591 dev->data->nb_tx_queues = 0;
1595 ice_recv_pkts(void *rx_queue,
1596 struct rte_mbuf **rx_pkts,
1599 struct ice_rx_queue *rxq = rx_queue;
1600 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1601 volatile union ice_rx_desc *rxdp;
1602 union ice_rx_desc rxd;
1603 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1604 struct ice_rx_entry *rxe;
1605 struct rte_mbuf *nmb; /* new allocated mbuf */
1606 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1607 uint16_t rx_id = rxq->rx_tail;
1609 uint16_t nb_hold = 0;
1610 uint16_t rx_packet_len;
1614 uint64_t pkt_flags = 0;
1615 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1616 struct rte_eth_dev *dev;
1618 while (nb_rx < nb_pkts) {
1619 rxdp = &rx_ring[rx_id];
1620 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1621 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1622 ICE_RXD_QW1_STATUS_S;
1624 /* Check the DD bit first */
1625 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1629 nmb = rte_mbuf_raw_alloc(rxq->mp);
1630 if (unlikely(!nmb)) {
1631 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1632 dev->data->rx_mbuf_alloc_failed++;
1635 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1638 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1640 if (unlikely(rx_id == rxq->nb_rx_desc))
1645 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1648 * fill the read format of descriptor with physic address in
1649 * new allocated mbuf: nmb
1651 rxdp->read.hdr_addr = 0;
1652 rxdp->read.pkt_addr = dma_addr;
1654 /* calculate rx_packet_len of the received pkt */
1655 rx_packet_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1656 ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
1658 /* fill old mbuf with received descriptor: rxd */
1659 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1660 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1663 rxm->pkt_len = rx_packet_len;
1664 rxm->data_len = rx_packet_len;
1665 rxm->port = rxq->port_id;
1666 ice_rxd_to_vlan_tci(rxm, rxdp);
1667 rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
1668 ICE_RXD_QW1_PTYPE_M) >>
1669 ICE_RXD_QW1_PTYPE_S)];
1670 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1671 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1672 if (pkt_flags & PKT_RX_RSS_HASH)
1674 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1675 rxm->ol_flags |= pkt_flags;
1676 /* copy old mbuf to rx_pkts */
1677 rx_pkts[nb_rx++] = rxm;
1679 rxq->rx_tail = rx_id;
1681 * If the number of free RX descriptors is greater than the RX free
1682 * threshold of the queue, advance the receive tail register of queue.
1683 * Update that register with the value of the last processed RX
1684 * descriptor minus 1.
1686 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1687 if (nb_hold > rxq->rx_free_thresh) {
1688 rx_id = (uint16_t)(rx_id == 0 ?
1689 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1690 /* write TAIL register */
1691 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1694 rxq->nb_rx_hold = nb_hold;
1696 /* return received packet in the burst */
1701 ice_txd_enable_checksum(uint64_t ol_flags,
1703 uint32_t *td_offset,
1704 union ice_tx_offload tx_offload)
1706 /* L2 length must be set. */
1707 *td_offset |= (tx_offload.l2_len >> 1) <<
1708 ICE_TX_DESC_LEN_MACLEN_S;
1710 /* Enable L3 checksum offloads */
1711 if (ol_flags & PKT_TX_IP_CKSUM) {
1712 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1713 *td_offset |= (tx_offload.l3_len >> 2) <<
1714 ICE_TX_DESC_LEN_IPLEN_S;
1715 } else if (ol_flags & PKT_TX_IPV4) {
1716 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1717 *td_offset |= (tx_offload.l3_len >> 2) <<
1718 ICE_TX_DESC_LEN_IPLEN_S;
1719 } else if (ol_flags & PKT_TX_IPV6) {
1720 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1721 *td_offset |= (tx_offload.l3_len >> 2) <<
1722 ICE_TX_DESC_LEN_IPLEN_S;
1725 if (ol_flags & PKT_TX_TCP_SEG) {
1726 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1727 *td_offset |= (tx_offload.l4_len >> 2) <<
1728 ICE_TX_DESC_LEN_L4_LEN_S;
1732 /* Enable L4 checksum offloads */
1733 switch (ol_flags & PKT_TX_L4_MASK) {
1734 case PKT_TX_TCP_CKSUM:
1735 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1736 *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
1737 ICE_TX_DESC_LEN_L4_LEN_S;
1739 case PKT_TX_SCTP_CKSUM:
1740 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1741 *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
1742 ICE_TX_DESC_LEN_L4_LEN_S;
1744 case PKT_TX_UDP_CKSUM:
1745 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1746 *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
1747 ICE_TX_DESC_LEN_L4_LEN_S;
1755 ice_xmit_cleanup(struct ice_tx_queue *txq)
1757 struct ice_tx_entry *sw_ring = txq->sw_ring;
1758 volatile struct ice_tx_desc *txd = txq->tx_ring;
1759 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1760 uint16_t nb_tx_desc = txq->nb_tx_desc;
1761 uint16_t desc_to_clean_to;
1762 uint16_t nb_tx_to_clean;
1764 /* Determine the last descriptor needing to be cleaned */
1765 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
1766 if (desc_to_clean_to >= nb_tx_desc)
1767 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1769 /* Check to make sure the last descriptor to clean is done */
1770 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1771 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
1772 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
1773 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1774 "(port=%d queue=%d) value=0x%"PRIx64"\n",
1776 txq->port_id, txq->queue_id,
1777 txd[desc_to_clean_to].cmd_type_offset_bsz);
1778 /* Failed to clean any descriptors */
1782 /* Figure out how many descriptors will be cleaned */
1783 if (last_desc_cleaned > desc_to_clean_to)
1784 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1787 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1790 /* The last descriptor to clean is done, so that means all the
1791 * descriptors from the last descriptor that was cleaned
1792 * up to the last descriptor with the RS bit set
1793 * are done. Only reset the threshold descriptor.
1795 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1797 /* Update the txq to reflect the last descriptor that was cleaned */
1798 txq->last_desc_cleaned = desc_to_clean_to;
1799 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
1804 /* Construct the tx flags */
1805 static inline uint64_t
1806 ice_build_ctob(uint32_t td_cmd,
1811 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
1812 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
1813 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
1814 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1815 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
1818 /* Check if the context descriptor is needed for TX offloading */
1819 static inline uint16_t
1820 ice_calc_context_desc(uint64_t flags)
1822 static uint64_t mask = PKT_TX_TCP_SEG | PKT_TX_QINQ;
1824 return (flags & mask) ? 1 : 0;
1827 /* set ice TSO context descriptor */
1828 static inline uint64_t
1829 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
1831 uint64_t ctx_desc = 0;
1832 uint32_t cd_cmd, hdr_len, cd_tso_len;
1834 if (!tx_offload.l4_len) {
1835 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1840 * in case of non tunneling packet, the outer_l2_len and
1841 * outer_l3_len must be 0.
1843 hdr_len = tx_offload.outer_l2_len +
1844 tx_offload.outer_l3_len +
1849 cd_cmd = ICE_TX_CTX_DESC_TSO;
1850 cd_tso_len = mbuf->pkt_len - hdr_len;
1851 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
1852 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1853 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
1859 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1861 struct ice_tx_queue *txq;
1862 volatile struct ice_tx_desc *tx_ring;
1863 volatile struct ice_tx_desc *txd;
1864 struct ice_tx_entry *sw_ring;
1865 struct ice_tx_entry *txe, *txn;
1866 struct rte_mbuf *tx_pkt;
1867 struct rte_mbuf *m_seg;
1872 uint32_t td_cmd = 0;
1873 uint32_t td_offset = 0;
1874 uint32_t td_tag = 0;
1876 uint64_t buf_dma_addr;
1878 union ice_tx_offload tx_offload = {0};
1881 sw_ring = txq->sw_ring;
1882 tx_ring = txq->tx_ring;
1883 tx_id = txq->tx_tail;
1884 txe = &sw_ring[tx_id];
1886 /* Check if the descriptor ring needs to be cleaned. */
1887 if (txq->nb_tx_free < txq->tx_free_thresh)
1888 ice_xmit_cleanup(txq);
1890 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1891 tx_pkt = *tx_pkts++;
1894 ol_flags = tx_pkt->ol_flags;
1895 tx_offload.l2_len = tx_pkt->l2_len;
1896 tx_offload.l3_len = tx_pkt->l3_len;
1897 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
1898 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
1899 tx_offload.l4_len = tx_pkt->l4_len;
1900 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1901 /* Calculate the number of context descriptors needed. */
1902 nb_ctx = ice_calc_context_desc(ol_flags);
1904 /* The number of descriptors that must be allocated for
1905 * a packet equals to the number of the segments of that
1906 * packet plus the number of context descriptor if needed.
1908 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1909 tx_last = (uint16_t)(tx_id + nb_used - 1);
1912 if (tx_last >= txq->nb_tx_desc)
1913 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1915 if (nb_used > txq->nb_tx_free) {
1916 if (ice_xmit_cleanup(txq) != 0) {
1921 if (unlikely(nb_used > txq->tx_rs_thresh)) {
1922 while (nb_used > txq->nb_tx_free) {
1923 if (ice_xmit_cleanup(txq) != 0) {
1932 /* Descriptor based VLAN insertion */
1933 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
1934 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
1935 td_tag = tx_pkt->vlan_tci;
1938 /* Enable checksum offloading */
1939 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
1940 ice_txd_enable_checksum(ol_flags, &td_cmd,
1941 &td_offset, tx_offload);
1945 /* Setup TX context descriptor if required */
1946 volatile struct ice_tx_ctx_desc *ctx_txd =
1947 (volatile struct ice_tx_ctx_desc *)
1949 uint16_t cd_l2tag2 = 0;
1950 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
1952 txn = &sw_ring[txe->next_id];
1953 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1955 rte_pktmbuf_free_seg(txe->mbuf);
1959 if (ol_flags & PKT_TX_TCP_SEG)
1960 cd_type_cmd_tso_mss |=
1961 ice_set_tso_ctx(tx_pkt, tx_offload);
1963 /* TX context descriptor based double VLAN insert */
1964 if (ol_flags & PKT_TX_QINQ) {
1965 cd_l2tag2 = tx_pkt->vlan_tci_outer;
1966 cd_type_cmd_tso_mss |=
1967 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
1968 ICE_TXD_CTX_QW1_CMD_S);
1970 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
1972 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1974 txe->last_id = tx_last;
1975 tx_id = txe->next_id;
1981 txd = &tx_ring[tx_id];
1982 txn = &sw_ring[txe->next_id];
1985 rte_pktmbuf_free_seg(txe->mbuf);
1988 /* Setup TX Descriptor */
1989 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1990 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
1991 txd->cmd_type_offset_bsz =
1992 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
1993 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
1994 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
1995 ((uint64_t)m_seg->data_len <<
1996 ICE_TXD_QW1_TX_BUF_SZ_S) |
1997 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
1999 txe->last_id = tx_last;
2000 tx_id = txe->next_id;
2002 m_seg = m_seg->next;
2005 /* fill the last descriptor with End of Packet (EOP) bit */
2006 td_cmd |= ICE_TX_DESC_CMD_EOP;
2007 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2008 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2010 /* set RS bit on the last descriptor of one packet */
2011 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2012 PMD_TX_FREE_LOG(DEBUG,
2013 "Setting RS bit on TXD id="
2014 "%4u (port=%d queue=%d)",
2015 tx_last, txq->port_id, txq->queue_id);
2017 td_cmd |= ICE_TX_DESC_CMD_RS;
2019 /* Update txq RS bit counters */
2020 txq->nb_tx_used = 0;
2022 txd->cmd_type_offset_bsz |=
2023 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2029 /* update Tail register */
2030 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2031 txq->tx_tail = tx_id;
2036 static inline int __attribute__((always_inline))
2037 ice_tx_free_bufs(struct ice_tx_queue *txq)
2039 struct ice_tx_entry *txep;
2042 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2043 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2044 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2047 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2049 for (i = 0; i < txq->tx_rs_thresh; i++)
2050 rte_prefetch0((txep + i)->mbuf);
2052 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2053 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2054 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2058 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2059 rte_pktmbuf_free_seg(txep->mbuf);
2064 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2065 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2066 if (txq->tx_next_dd >= txq->nb_tx_desc)
2067 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2069 return txq->tx_rs_thresh;
2072 /* Populate 4 descriptors with data from 4 mbufs */
2074 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2079 for (i = 0; i < 4; i++, txdp++, pkts++) {
2080 dma_addr = rte_mbuf_data_iova(*pkts);
2081 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2082 txdp->cmd_type_offset_bsz =
2083 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2084 (*pkts)->data_len, 0);
2088 /* Populate 1 descriptor with data from 1 mbuf */
2090 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2094 dma_addr = rte_mbuf_data_iova(*pkts);
2095 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2096 txdp->cmd_type_offset_bsz =
2097 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2098 (*pkts)->data_len, 0);
2102 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2105 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2106 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2107 const int N_PER_LOOP = 4;
2108 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2109 int mainpart, leftover;
2113 * Process most of the packets in chunks of N pkts. Any
2114 * leftover packets will get processed one at a time.
2116 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2117 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2118 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2119 /* Copy N mbuf pointers to the S/W ring */
2120 for (j = 0; j < N_PER_LOOP; ++j)
2121 (txep + i + j)->mbuf = *(pkts + i + j);
2122 tx4(txdp + i, pkts + i);
2125 if (unlikely(leftover > 0)) {
2126 for (i = 0; i < leftover; ++i) {
2127 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2128 tx1(txdp + mainpart + i, pkts + mainpart + i);
2133 static inline uint16_t
2134 tx_xmit_pkts(struct ice_tx_queue *txq,
2135 struct rte_mbuf **tx_pkts,
2138 volatile struct ice_tx_desc *txr = txq->tx_ring;
2142 * Begin scanning the H/W ring for done descriptors when the number
2143 * of available descriptors drops below tx_free_thresh. For each done
2144 * descriptor, free the associated buffer.
2146 if (txq->nb_tx_free < txq->tx_free_thresh)
2147 ice_tx_free_bufs(txq);
2149 /* Use available descriptor only */
2150 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2151 if (unlikely(!nb_pkts))
2154 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2155 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2156 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2157 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2158 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2159 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2161 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2165 /* Fill hardware descriptor ring with mbuf data */
2166 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2167 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2169 /* Determin if RS bit needs to be set */
2170 if (txq->tx_tail > txq->tx_next_rs) {
2171 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2172 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2175 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2176 if (txq->tx_next_rs >= txq->nb_tx_desc)
2177 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2180 if (txq->tx_tail >= txq->nb_tx_desc)
2183 /* Update the tx tail register */
2185 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2191 ice_xmit_pkts_simple(void *tx_queue,
2192 struct rte_mbuf **tx_pkts,
2197 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2198 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2202 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2205 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2206 &tx_pkts[nb_tx], num);
2207 nb_tx = (uint16_t)(nb_tx + ret);
2208 nb_pkts = (uint16_t)(nb_pkts - ret);
2216 void __attribute__((cold))
2217 ice_set_rx_function(struct rte_eth_dev *dev)
2219 PMD_INIT_FUNC_TRACE();
2220 struct ice_adapter *ad =
2221 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2223 if (dev->data->scattered_rx) {
2224 /* Set the non-LRO scattered function */
2226 "Using a Scattered function on port %d.",
2227 dev->data->port_id);
2228 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2229 } else if (ad->rx_bulk_alloc_allowed) {
2231 "Rx Burst Bulk Alloc Preconditions are "
2232 "satisfied. Rx Burst Bulk Alloc function "
2233 "will be used on port %d.",
2234 dev->data->port_id);
2235 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2238 "Rx Burst Bulk Alloc Preconditions are not "
2239 "satisfied, Normal Rx will be used on port %d.",
2240 dev->data->port_id);
2241 dev->rx_pkt_burst = ice_recv_pkts;
2245 /*********************************************************************
2249 **********************************************************************/
2250 /* The default values of TSO MSS */
2251 #define ICE_MIN_TSO_MSS 64
2252 #define ICE_MAX_TSO_MSS 9728
2253 #define ICE_MAX_TSO_FRAME_SIZE 262144
2255 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2262 for (i = 0; i < nb_pkts; i++) {
2264 ol_flags = m->ol_flags;
2266 if (ol_flags & PKT_TX_TCP_SEG &&
2267 (m->tso_segsz < ICE_MIN_TSO_MSS ||
2268 m->tso_segsz > ICE_MAX_TSO_MSS ||
2269 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
2271 * MSS outside the range are considered malicious
2273 rte_errno = -EINVAL;
2277 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2278 ret = rte_validate_tx_offload(m);
2284 ret = rte_net_intel_cksum_prepare(m);
2293 void __attribute__((cold))
2294 ice_set_tx_function(struct rte_eth_dev *dev)
2296 struct ice_adapter *ad =
2297 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2299 if (ad->tx_simple_allowed) {
2300 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
2301 dev->tx_pkt_burst = ice_xmit_pkts_simple;
2302 dev->tx_pkt_prepare = NULL;
2304 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
2305 dev->tx_pkt_burst = ice_xmit_pkts;
2306 dev->tx_pkt_prepare = ice_prep_pkts;
2310 /* For each value it means, datasheet of hardware can tell more details
2312 * @note: fix ice_dev_supported_ptypes_get() if any change here.
2314 static inline uint32_t
2315 ice_get_default_pkt_type(uint16_t ptype)
2317 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
2318 __rte_cache_aligned = {
2321 [1] = RTE_PTYPE_L2_ETHER,
2322 /* [2] - [5] reserved */
2323 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2324 /* [7] - [10] reserved */
2325 [11] = RTE_PTYPE_L2_ETHER_ARP,
2326 /* [12] - [21] reserved */
2328 /* Non tunneled IPv4 */
2329 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2331 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2332 RTE_PTYPE_L4_NONFRAG,
2333 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2336 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2338 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2340 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2344 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2345 RTE_PTYPE_TUNNEL_IP |
2346 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2347 RTE_PTYPE_INNER_L4_FRAG,
2348 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2349 RTE_PTYPE_TUNNEL_IP |
2350 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2351 RTE_PTYPE_INNER_L4_NONFRAG,
2352 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2353 RTE_PTYPE_TUNNEL_IP |
2354 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2355 RTE_PTYPE_INNER_L4_UDP,
2357 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2358 RTE_PTYPE_TUNNEL_IP |
2359 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2360 RTE_PTYPE_INNER_L4_TCP,
2361 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2362 RTE_PTYPE_TUNNEL_IP |
2363 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2364 RTE_PTYPE_INNER_L4_SCTP,
2365 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2366 RTE_PTYPE_TUNNEL_IP |
2367 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2368 RTE_PTYPE_INNER_L4_ICMP,
2371 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2372 RTE_PTYPE_TUNNEL_IP |
2373 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2374 RTE_PTYPE_INNER_L4_FRAG,
2375 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2376 RTE_PTYPE_TUNNEL_IP |
2377 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2378 RTE_PTYPE_INNER_L4_NONFRAG,
2379 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2380 RTE_PTYPE_TUNNEL_IP |
2381 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2382 RTE_PTYPE_INNER_L4_UDP,
2384 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2385 RTE_PTYPE_TUNNEL_IP |
2386 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2387 RTE_PTYPE_INNER_L4_TCP,
2388 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2389 RTE_PTYPE_TUNNEL_IP |
2390 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2391 RTE_PTYPE_INNER_L4_SCTP,
2392 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2393 RTE_PTYPE_TUNNEL_IP |
2394 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2395 RTE_PTYPE_INNER_L4_ICMP,
2397 /* IPv4 --> GRE/Teredo/VXLAN */
2398 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2399 RTE_PTYPE_TUNNEL_GRENAT,
2401 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2402 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2403 RTE_PTYPE_TUNNEL_GRENAT |
2404 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2405 RTE_PTYPE_INNER_L4_FRAG,
2406 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2407 RTE_PTYPE_TUNNEL_GRENAT |
2408 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2409 RTE_PTYPE_INNER_L4_NONFRAG,
2410 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2411 RTE_PTYPE_TUNNEL_GRENAT |
2412 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2413 RTE_PTYPE_INNER_L4_UDP,
2415 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2416 RTE_PTYPE_TUNNEL_GRENAT |
2417 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2418 RTE_PTYPE_INNER_L4_TCP,
2419 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2420 RTE_PTYPE_TUNNEL_GRENAT |
2421 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2422 RTE_PTYPE_INNER_L4_SCTP,
2423 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2424 RTE_PTYPE_TUNNEL_GRENAT |
2425 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2426 RTE_PTYPE_INNER_L4_ICMP,
2428 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2429 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2430 RTE_PTYPE_TUNNEL_GRENAT |
2431 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2432 RTE_PTYPE_INNER_L4_FRAG,
2433 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2434 RTE_PTYPE_TUNNEL_GRENAT |
2435 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2436 RTE_PTYPE_INNER_L4_NONFRAG,
2437 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2438 RTE_PTYPE_TUNNEL_GRENAT |
2439 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2440 RTE_PTYPE_INNER_L4_UDP,
2442 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2443 RTE_PTYPE_TUNNEL_GRENAT |
2444 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2445 RTE_PTYPE_INNER_L4_TCP,
2446 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2447 RTE_PTYPE_TUNNEL_GRENAT |
2448 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2449 RTE_PTYPE_INNER_L4_SCTP,
2450 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2451 RTE_PTYPE_TUNNEL_GRENAT |
2452 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2453 RTE_PTYPE_INNER_L4_ICMP,
2455 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2456 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2457 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2459 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2460 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2461 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2462 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2463 RTE_PTYPE_INNER_L4_FRAG,
2464 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2465 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2466 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2467 RTE_PTYPE_INNER_L4_NONFRAG,
2468 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2469 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2470 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2471 RTE_PTYPE_INNER_L4_UDP,
2473 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2474 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2475 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2476 RTE_PTYPE_INNER_L4_TCP,
2477 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2478 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2479 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2480 RTE_PTYPE_INNER_L4_SCTP,
2481 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2482 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2483 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2484 RTE_PTYPE_INNER_L4_ICMP,
2486 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2487 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2488 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2489 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2490 RTE_PTYPE_INNER_L4_FRAG,
2491 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2492 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2493 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2494 RTE_PTYPE_INNER_L4_NONFRAG,
2495 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2496 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2497 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2498 RTE_PTYPE_INNER_L4_UDP,
2500 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2501 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2502 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2503 RTE_PTYPE_INNER_L4_TCP,
2504 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2505 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2506 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2507 RTE_PTYPE_INNER_L4_SCTP,
2508 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2509 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2510 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2511 RTE_PTYPE_INNER_L4_ICMP,
2513 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2514 [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2515 RTE_PTYPE_TUNNEL_GRENAT |
2516 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2518 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2519 [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2520 RTE_PTYPE_TUNNEL_GRENAT |
2521 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2522 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2523 RTE_PTYPE_INNER_L4_FRAG,
2524 [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2525 RTE_PTYPE_TUNNEL_GRENAT |
2526 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2527 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2528 RTE_PTYPE_INNER_L4_NONFRAG,
2529 [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2530 RTE_PTYPE_TUNNEL_GRENAT |
2531 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2532 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2533 RTE_PTYPE_INNER_L4_UDP,
2535 [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2536 RTE_PTYPE_TUNNEL_GRENAT |
2537 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2538 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2539 RTE_PTYPE_INNER_L4_TCP,
2540 [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2541 RTE_PTYPE_TUNNEL_GRENAT |
2542 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2543 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2544 RTE_PTYPE_INNER_L4_SCTP,
2545 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2546 RTE_PTYPE_TUNNEL_GRENAT |
2547 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2548 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2549 RTE_PTYPE_INNER_L4_ICMP,
2551 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
2552 [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2553 RTE_PTYPE_TUNNEL_GRENAT |
2554 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2555 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2556 RTE_PTYPE_INNER_L4_FRAG,
2557 [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2558 RTE_PTYPE_TUNNEL_GRENAT |
2559 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2560 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2561 RTE_PTYPE_INNER_L4_NONFRAG,
2562 [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2563 RTE_PTYPE_TUNNEL_GRENAT |
2564 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2565 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2566 RTE_PTYPE_INNER_L4_UDP,
2568 [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2569 RTE_PTYPE_TUNNEL_GRENAT |
2570 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2571 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2572 RTE_PTYPE_INNER_L4_TCP,
2573 [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2574 RTE_PTYPE_TUNNEL_GRENAT |
2575 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2576 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2577 RTE_PTYPE_INNER_L4_SCTP,
2578 [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2579 RTE_PTYPE_TUNNEL_GRENAT |
2580 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2581 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2582 RTE_PTYPE_INNER_L4_ICMP,
2584 /* Non tunneled IPv6 */
2585 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2587 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2588 RTE_PTYPE_L4_NONFRAG,
2589 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2592 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2594 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2596 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2600 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2601 RTE_PTYPE_TUNNEL_IP |
2602 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2603 RTE_PTYPE_INNER_L4_FRAG,
2604 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2605 RTE_PTYPE_TUNNEL_IP |
2606 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2607 RTE_PTYPE_INNER_L4_NONFRAG,
2608 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2609 RTE_PTYPE_TUNNEL_IP |
2610 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2611 RTE_PTYPE_INNER_L4_UDP,
2613 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2614 RTE_PTYPE_TUNNEL_IP |
2615 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2616 RTE_PTYPE_INNER_L4_TCP,
2617 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2618 RTE_PTYPE_TUNNEL_IP |
2619 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2620 RTE_PTYPE_INNER_L4_SCTP,
2621 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2622 RTE_PTYPE_TUNNEL_IP |
2623 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2624 RTE_PTYPE_INNER_L4_ICMP,
2627 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2628 RTE_PTYPE_TUNNEL_IP |
2629 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2630 RTE_PTYPE_INNER_L4_FRAG,
2631 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2632 RTE_PTYPE_TUNNEL_IP |
2633 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2634 RTE_PTYPE_INNER_L4_NONFRAG,
2635 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2636 RTE_PTYPE_TUNNEL_IP |
2637 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2638 RTE_PTYPE_INNER_L4_UDP,
2639 /* [105] reserved */
2640 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2641 RTE_PTYPE_TUNNEL_IP |
2642 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2643 RTE_PTYPE_INNER_L4_TCP,
2644 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2645 RTE_PTYPE_TUNNEL_IP |
2646 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2647 RTE_PTYPE_INNER_L4_SCTP,
2648 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2649 RTE_PTYPE_TUNNEL_IP |
2650 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2651 RTE_PTYPE_INNER_L4_ICMP,
2653 /* IPv6 --> GRE/Teredo/VXLAN */
2654 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2655 RTE_PTYPE_TUNNEL_GRENAT,
2657 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2658 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2659 RTE_PTYPE_TUNNEL_GRENAT |
2660 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2661 RTE_PTYPE_INNER_L4_FRAG,
2662 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2663 RTE_PTYPE_TUNNEL_GRENAT |
2664 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2665 RTE_PTYPE_INNER_L4_NONFRAG,
2666 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2667 RTE_PTYPE_TUNNEL_GRENAT |
2668 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2669 RTE_PTYPE_INNER_L4_UDP,
2670 /* [113] reserved */
2671 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2672 RTE_PTYPE_TUNNEL_GRENAT |
2673 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2674 RTE_PTYPE_INNER_L4_TCP,
2675 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2676 RTE_PTYPE_TUNNEL_GRENAT |
2677 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2678 RTE_PTYPE_INNER_L4_SCTP,
2679 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2680 RTE_PTYPE_TUNNEL_GRENAT |
2681 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2682 RTE_PTYPE_INNER_L4_ICMP,
2684 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2685 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2686 RTE_PTYPE_TUNNEL_GRENAT |
2687 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2688 RTE_PTYPE_INNER_L4_FRAG,
2689 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2690 RTE_PTYPE_TUNNEL_GRENAT |
2691 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2692 RTE_PTYPE_INNER_L4_NONFRAG,
2693 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2694 RTE_PTYPE_TUNNEL_GRENAT |
2695 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2696 RTE_PTYPE_INNER_L4_UDP,
2697 /* [120] reserved */
2698 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2699 RTE_PTYPE_TUNNEL_GRENAT |
2700 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2701 RTE_PTYPE_INNER_L4_TCP,
2702 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2703 RTE_PTYPE_TUNNEL_GRENAT |
2704 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2705 RTE_PTYPE_INNER_L4_SCTP,
2706 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2707 RTE_PTYPE_TUNNEL_GRENAT |
2708 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2709 RTE_PTYPE_INNER_L4_ICMP,
2711 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2712 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2713 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2715 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2716 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2717 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2718 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2719 RTE_PTYPE_INNER_L4_FRAG,
2720 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2721 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2722 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2723 RTE_PTYPE_INNER_L4_NONFRAG,
2724 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2725 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2726 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2727 RTE_PTYPE_INNER_L4_UDP,
2728 /* [128] reserved */
2729 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2730 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2731 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2732 RTE_PTYPE_INNER_L4_TCP,
2733 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2734 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2735 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2736 RTE_PTYPE_INNER_L4_SCTP,
2737 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2738 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2739 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2740 RTE_PTYPE_INNER_L4_ICMP,
2742 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2743 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2744 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2745 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2746 RTE_PTYPE_INNER_L4_FRAG,
2747 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2748 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2749 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2750 RTE_PTYPE_INNER_L4_NONFRAG,
2751 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2752 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2753 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2754 RTE_PTYPE_INNER_L4_UDP,
2755 /* [135] reserved */
2756 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2757 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2758 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2759 RTE_PTYPE_INNER_L4_TCP,
2760 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2761 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2762 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2763 RTE_PTYPE_INNER_L4_SCTP,
2764 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2765 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2766 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2767 RTE_PTYPE_INNER_L4_ICMP,
2769 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2770 [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2771 RTE_PTYPE_TUNNEL_GRENAT |
2772 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2774 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2775 [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2776 RTE_PTYPE_TUNNEL_GRENAT |
2777 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2778 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2779 RTE_PTYPE_INNER_L4_FRAG,
2780 [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2781 RTE_PTYPE_TUNNEL_GRENAT |
2782 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2783 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2784 RTE_PTYPE_INNER_L4_NONFRAG,
2785 [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2786 RTE_PTYPE_TUNNEL_GRENAT |
2787 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2788 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2789 RTE_PTYPE_INNER_L4_UDP,
2790 /* [143] reserved */
2791 [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2792 RTE_PTYPE_TUNNEL_GRENAT |
2793 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2794 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2795 RTE_PTYPE_INNER_L4_TCP,
2796 [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2797 RTE_PTYPE_TUNNEL_GRENAT |
2798 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2799 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2800 RTE_PTYPE_INNER_L4_SCTP,
2801 [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2802 RTE_PTYPE_TUNNEL_GRENAT |
2803 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2804 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2805 RTE_PTYPE_INNER_L4_ICMP,
2807 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
2808 [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2809 RTE_PTYPE_TUNNEL_GRENAT |
2810 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2811 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2812 RTE_PTYPE_INNER_L4_FRAG,
2813 [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2814 RTE_PTYPE_TUNNEL_GRENAT |
2815 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2816 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2817 RTE_PTYPE_INNER_L4_NONFRAG,
2818 [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2819 RTE_PTYPE_TUNNEL_GRENAT |
2820 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2821 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2822 RTE_PTYPE_INNER_L4_UDP,
2823 /* [150] reserved */
2824 [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2825 RTE_PTYPE_TUNNEL_GRENAT |
2826 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2827 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2828 RTE_PTYPE_INNER_L4_TCP,
2829 [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2830 RTE_PTYPE_TUNNEL_GRENAT |
2831 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2832 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2833 RTE_PTYPE_INNER_L4_SCTP,
2834 [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2835 RTE_PTYPE_TUNNEL_GRENAT |
2836 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2837 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2838 RTE_PTYPE_INNER_L4_ICMP,
2839 /* [154] - [255] reserved */
2840 [256] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2841 RTE_PTYPE_TUNNEL_GTPC,
2842 [257] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2843 RTE_PTYPE_TUNNEL_GTPC,
2844 [258] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2845 RTE_PTYPE_TUNNEL_GTPU,
2846 [259] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2847 RTE_PTYPE_TUNNEL_GTPU,
2848 /* [260] - [263] reserved */
2849 [264] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2850 RTE_PTYPE_TUNNEL_GTPC,
2851 [265] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2852 RTE_PTYPE_TUNNEL_GTPC,
2853 [266] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2854 RTE_PTYPE_TUNNEL_GTPU,
2855 [267] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2856 RTE_PTYPE_TUNNEL_GTPU,
2858 /* All others reserved */
2861 return type_table[ptype];
2864 void __attribute__((cold))
2865 ice_set_default_ptype_table(struct rte_eth_dev *dev)
2867 struct ice_adapter *ad =
2868 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2871 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
2872 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);