1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
10 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
14 PKT_TX_OUTER_IP_CKSUM)
17 static enum ice_status
18 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
20 struct ice_vsi *vsi = rxq->vsi;
21 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
22 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
23 struct ice_rlan_ctx rx_ctx;
25 uint16_t buf_size, len;
26 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
27 uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
30 /* Set buffer size as the head split is disabled. */
31 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
32 RTE_PKTMBUF_HEADROOM);
34 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
35 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
36 rxq->max_pkt_len = RTE_MIN(len,
37 dev->data->dev_conf.rxmode.max_rx_pkt_len);
39 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
40 if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
41 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
42 PMD_DRV_LOG(ERR, "maximum packet length must "
43 "be larger than %u and smaller than %u,"
44 "as jumbo frame is enabled",
45 (uint32_t)RTE_ETHER_MAX_LEN,
46 (uint32_t)ICE_FRAME_SIZE_MAX);
50 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
51 rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
52 PMD_DRV_LOG(ERR, "maximum packet length must be "
53 "larger than %u and smaller than %u, "
54 "as jumbo frame is disabled",
55 (uint32_t)RTE_ETHER_MIN_LEN,
56 (uint32_t)RTE_ETHER_MAX_LEN);
61 memset(&rx_ctx, 0, sizeof(rx_ctx));
63 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
64 rx_ctx.qlen = rxq->nb_rx_desc;
65 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
66 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
67 rx_ctx.dtype = 0; /* No Header Split mode */
68 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
69 rx_ctx.dsize = 1; /* 32B descriptors */
71 rx_ctx.rxmax = rxq->max_pkt_len;
72 /* TPH: Transaction Layer Packet (TLP) processing hints */
73 rx_ctx.tphrdesc_ena = 1;
74 rx_ctx.tphwdesc_ena = 1;
75 rx_ctx.tphdata_ena = 1;
76 rx_ctx.tphhead_ena = 1;
77 /* Low Receive Queue Threshold defined in 64 descriptors units.
78 * When the number of free descriptors goes below the lrxqthresh,
79 * an immediate interrupt is triggered.
81 rx_ctx.lrxqthresh = 2;
82 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
85 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
87 /* Enable Flexible Descriptors in the queue context which
88 * allows this driver to select a specific receive descriptor format
90 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
91 QRXFLXP_CNTXT_RXDID_IDX_M;
93 /* increasing context priority to pick up profile ID;
94 * default is 0x01; setting to 0x03 to ensure profile
95 * is programming if prev context is of same priority
97 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
98 QRXFLXP_CNTXT_RXDID_PRIO_M;
100 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
102 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
104 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
108 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
110 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
115 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
116 RTE_PKTMBUF_HEADROOM);
118 /* Check if scattered RX needs to be used. */
119 if (rxq->max_pkt_len > buf_size)
120 dev->data->scattered_rx = 1;
122 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
124 /* Init the Rx tail register*/
125 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
130 /* Allocate mbufs for all descriptors in rx queue */
132 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
134 struct ice_rx_entry *rxe = rxq->sw_ring;
138 for (i = 0; i < rxq->nb_rx_desc; i++) {
139 volatile union ice_rx_desc *rxd;
140 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
142 if (unlikely(!mbuf)) {
143 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
147 rte_mbuf_refcnt_set(mbuf, 1);
149 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
151 mbuf->port = rxq->port_id;
154 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
156 rxd = &rxq->rx_ring[i];
157 rxd->read.pkt_addr = dma_addr;
158 rxd->read.hdr_addr = 0;
159 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
169 /* Free all mbufs for descriptors in rx queue */
171 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
175 if (!rxq || !rxq->sw_ring) {
176 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
180 for (i = 0; i < rxq->nb_rx_desc; i++) {
181 if (rxq->sw_ring[i].mbuf) {
182 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
183 rxq->sw_ring[i].mbuf = NULL;
186 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
187 if (rxq->rx_nb_avail == 0)
189 for (i = 0; i < rxq->rx_nb_avail; i++) {
190 struct rte_mbuf *mbuf;
192 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
193 rte_pktmbuf_free_seg(mbuf);
195 rxq->rx_nb_avail = 0;
196 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
200 ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
202 rxq->rx_rel_mbufs(rxq);
205 /* turn on or off rx queue
206 * @q_idx: queue index in pf scope
207 * @on: turn on or off the queue
210 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
215 /* QRX_CTRL = QRX_ENA */
216 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
219 if (reg & QRX_CTRL_QENA_STAT_M)
220 return 0; /* Already on, skip */
221 reg |= QRX_CTRL_QENA_REQ_M;
223 if (!(reg & QRX_CTRL_QENA_STAT_M))
224 return 0; /* Already off, skip */
225 reg &= ~QRX_CTRL_QENA_REQ_M;
228 /* Write the register */
229 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
230 /* Check the result. It is said that QENA_STAT
231 * follows the QENA_REQ not more than 10 use.
232 * TODO: need to change the wait counter later
234 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
235 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
236 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
238 if ((reg & QRX_CTRL_QENA_REQ_M) &&
239 (reg & QRX_CTRL_QENA_STAT_M))
242 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
243 !(reg & QRX_CTRL_QENA_STAT_M))
248 /* Check if it is timeout */
249 if (j >= ICE_CHK_Q_ENA_COUNT) {
250 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
251 (on ? "enable" : "disable"), q_idx);
259 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
260 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
262 ice_check_rx_burst_bulk_alloc_preconditions
263 (__rte_unused struct ice_rx_queue *rxq)
268 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
269 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
270 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
271 "rxq->rx_free_thresh=%d, "
272 "ICE_RX_MAX_BURST=%d",
273 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
275 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
276 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
277 "rxq->rx_free_thresh=%d, "
278 "rxq->nb_rx_desc=%d",
279 rxq->rx_free_thresh, rxq->nb_rx_desc);
281 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
282 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
283 "rxq->nb_rx_desc=%d, "
284 "rxq->rx_free_thresh=%d",
285 rxq->nb_rx_desc, rxq->rx_free_thresh);
295 /* reset fields in ice_rx_queue back to default */
297 ice_reset_rx_queue(struct ice_rx_queue *rxq)
303 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
307 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
308 if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
309 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
311 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
312 len = rxq->nb_rx_desc;
314 for (i = 0; i < len * sizeof(union ice_rx_desc); i++)
315 ((volatile char *)rxq->rx_ring)[i] = 0;
317 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
318 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
319 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
320 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
322 rxq->rx_nb_avail = 0;
323 rxq->rx_next_avail = 0;
324 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
325 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
329 rxq->pkt_first_seg = NULL;
330 rxq->pkt_last_seg = NULL;
332 rxq->rxrearm_start = 0;
337 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
339 struct ice_rx_queue *rxq;
341 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
343 PMD_INIT_FUNC_TRACE();
345 if (rx_queue_id >= dev->data->nb_rx_queues) {
346 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
347 rx_queue_id, dev->data->nb_rx_queues);
351 rxq = dev->data->rx_queues[rx_queue_id];
352 if (!rxq || !rxq->q_set) {
353 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
358 err = ice_program_hw_rx_queue(rxq);
360 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
365 err = ice_alloc_rx_queue_mbufs(rxq);
367 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
373 /* Init the RX tail register. */
374 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
376 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
378 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
381 ice_rx_queue_release_mbufs(rxq);
382 ice_reset_rx_queue(rxq);
386 dev->data->rx_queue_state[rx_queue_id] =
387 RTE_ETH_QUEUE_STATE_STARTED;
393 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
395 struct ice_rx_queue *rxq;
397 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
399 if (rx_queue_id < dev->data->nb_rx_queues) {
400 rxq = dev->data->rx_queues[rx_queue_id];
402 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
404 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
408 ice_rx_queue_release_mbufs(rxq);
409 ice_reset_rx_queue(rxq);
410 dev->data->rx_queue_state[rx_queue_id] =
411 RTE_ETH_QUEUE_STATE_STOPPED;
418 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
420 struct ice_tx_queue *txq;
424 struct ice_aqc_add_tx_qgrp txq_elem;
425 struct ice_tlan_ctx tx_ctx;
427 PMD_INIT_FUNC_TRACE();
429 if (tx_queue_id >= dev->data->nb_tx_queues) {
430 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
431 tx_queue_id, dev->data->nb_tx_queues);
435 txq = dev->data->tx_queues[tx_queue_id];
436 if (!txq || !txq->q_set) {
437 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
443 hw = ICE_VSI_TO_HW(vsi);
445 memset(&txq_elem, 0, sizeof(txq_elem));
446 memset(&tx_ctx, 0, sizeof(tx_ctx));
447 txq_elem.num_txqs = 1;
448 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
450 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
451 tx_ctx.qlen = txq->nb_tx_desc;
452 tx_ctx.pf_num = hw->pf_id;
453 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
454 tx_ctx.src_vsi = vsi->vsi_id;
455 tx_ctx.port_num = hw->port_info->lport;
456 tx_ctx.tso_ena = 1; /* tso enable */
457 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
458 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
460 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
463 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
465 /* Init the Tx tail register*/
466 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
468 /* Fix me, we assume TC always 0 here */
469 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
470 &txq_elem, sizeof(txq_elem), NULL);
472 PMD_DRV_LOG(ERR, "Failed to add lan txq");
475 /* store the schedule node id */
476 txq->q_teid = txq_elem.txqs[0].q_teid;
478 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
482 /* Free all mbufs for descriptors in tx queue */
484 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
488 if (!txq || !txq->sw_ring) {
489 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
493 for (i = 0; i < txq->nb_tx_desc; i++) {
494 if (txq->sw_ring[i].mbuf) {
495 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
496 txq->sw_ring[i].mbuf = NULL;
501 ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
503 txq->tx_rel_mbufs(txq);
507 ice_reset_tx_queue(struct ice_tx_queue *txq)
509 struct ice_tx_entry *txe;
510 uint16_t i, prev, size;
513 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
518 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
519 for (i = 0; i < size; i++)
520 ((volatile char *)txq->tx_ring)[i] = 0;
522 prev = (uint16_t)(txq->nb_tx_desc - 1);
523 for (i = 0; i < txq->nb_tx_desc; i++) {
524 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
526 txd->cmd_type_offset_bsz =
527 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
530 txe[prev].next_id = i;
534 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
535 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
540 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
541 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
545 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
547 struct ice_tx_queue *txq;
548 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
549 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
550 struct ice_vsi *vsi = pf->main_vsi;
551 enum ice_status status;
554 uint16_t q_handle = tx_queue_id;
556 if (tx_queue_id >= dev->data->nb_tx_queues) {
557 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
558 tx_queue_id, dev->data->nb_tx_queues);
562 txq = dev->data->tx_queues[tx_queue_id];
564 PMD_DRV_LOG(ERR, "TX queue %u is not available",
569 q_ids[0] = txq->reg_idx;
570 q_teids[0] = txq->q_teid;
572 /* Fix me, we assume TC always 0 here */
573 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
574 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
575 if (status != ICE_SUCCESS) {
576 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
580 ice_tx_queue_release_mbufs(txq);
581 ice_reset_tx_queue(txq);
582 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
588 ice_rx_queue_setup(struct rte_eth_dev *dev,
591 unsigned int socket_id,
592 const struct rte_eth_rxconf *rx_conf,
593 struct rte_mempool *mp)
595 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
596 struct ice_adapter *ad =
597 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
598 struct ice_vsi *vsi = pf->main_vsi;
599 struct ice_rx_queue *rxq;
600 const struct rte_memzone *rz;
603 int use_def_burst_func = 1;
605 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
606 nb_desc > ICE_MAX_RING_DESC ||
607 nb_desc < ICE_MIN_RING_DESC) {
608 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
613 /* Free memory if needed */
614 if (dev->data->rx_queues[queue_idx]) {
615 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
616 dev->data->rx_queues[queue_idx] = NULL;
619 /* Allocate the rx queue data structure */
620 rxq = rte_zmalloc_socket(NULL,
621 sizeof(struct ice_rx_queue),
625 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
626 "rx queue data structure");
630 rxq->nb_rx_desc = nb_desc;
631 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
632 rxq->queue_id = queue_idx;
634 rxq->reg_idx = vsi->base_queue + queue_idx;
635 rxq->port_id = dev->data->port_id;
636 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
637 rxq->crc_len = RTE_ETHER_CRC_LEN;
641 rxq->drop_en = rx_conf->rx_drop_en;
643 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
645 /* Allocate the maximun number of RX ring hardware descriptor. */
646 len = ICE_MAX_RING_DESC;
648 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
650 * Allocating a little more memory because vectorized/bulk_alloc Rx
651 * functions doesn't check boundaries each time.
653 len += ICE_RX_MAX_BURST;
656 /* Allocate the maximum number of RX ring hardware descriptor. */
657 ring_size = sizeof(union ice_rx_desc) * len;
658 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
659 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
660 ring_size, ICE_RING_BASE_ALIGN,
663 ice_rx_queue_release(rxq);
664 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
668 /* Zero all the descriptors in the ring. */
669 memset(rz->addr, 0, ring_size);
671 rxq->rx_ring_dma = rz->iova;
672 rxq->rx_ring = rz->addr;
674 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
675 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
680 /* Allocate the software ring. */
681 rxq->sw_ring = rte_zmalloc_socket(NULL,
682 sizeof(struct ice_rx_entry) * len,
686 ice_rx_queue_release(rxq);
687 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
691 ice_reset_rx_queue(rxq);
693 dev->data->rx_queues[queue_idx] = rxq;
694 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
696 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
698 if (!use_def_burst_func) {
699 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
700 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
701 "satisfied. Rx Burst Bulk Alloc function will be "
702 "used on port=%d, queue=%d.",
703 rxq->port_id, rxq->queue_id);
704 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
706 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
707 "not satisfied, Scattered Rx is requested, "
708 "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
709 "not enabled on port=%d, queue=%d.",
710 rxq->port_id, rxq->queue_id);
711 ad->rx_bulk_alloc_allowed = false;
718 ice_rx_queue_release(void *rxq)
720 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
723 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
727 ice_rx_queue_release_mbufs(q);
728 rte_free(q->sw_ring);
733 ice_tx_queue_setup(struct rte_eth_dev *dev,
736 unsigned int socket_id,
737 const struct rte_eth_txconf *tx_conf)
739 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
740 struct ice_vsi *vsi = pf->main_vsi;
741 struct ice_tx_queue *txq;
742 const struct rte_memzone *tz;
744 uint16_t tx_rs_thresh, tx_free_thresh;
747 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
749 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
750 nb_desc > ICE_MAX_RING_DESC ||
751 nb_desc < ICE_MIN_RING_DESC) {
752 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
758 * The following two parameters control the setting of the RS bit on
759 * transmit descriptors. TX descriptors will have their RS bit set
760 * after txq->tx_rs_thresh descriptors have been used. The TX
761 * descriptor ring will be cleaned after txq->tx_free_thresh
762 * descriptors are used or if the number of descriptors required to
763 * transmit a packet is greater than the number of free TX descriptors.
765 * The following constraints must be satisfied:
766 * - tx_rs_thresh must be greater than 0.
767 * - tx_rs_thresh must be less than the size of the ring minus 2.
768 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
769 * - tx_rs_thresh must be a divisor of the ring size.
770 * - tx_free_thresh must be greater than 0.
771 * - tx_free_thresh must be less than the size of the ring minus 3.
772 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
774 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
775 * race condition, hence the maximum threshold constraints. When set
776 * to zero use default values.
778 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
779 tx_conf->tx_free_thresh :
780 ICE_DEFAULT_TX_FREE_THRESH);
781 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
783 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
784 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
785 if (tx_conf->tx_rs_thresh)
786 tx_rs_thresh = tx_conf->tx_rs_thresh;
787 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
788 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
789 "exceed nb_desc. (tx_rs_thresh=%u "
790 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
791 (unsigned int)tx_rs_thresh,
792 (unsigned int)tx_free_thresh,
793 (unsigned int)nb_desc,
794 (int)dev->data->port_id,
798 if (tx_rs_thresh >= (nb_desc - 2)) {
799 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
800 "number of TX descriptors minus 2. "
801 "(tx_rs_thresh=%u port=%d queue=%d)",
802 (unsigned int)tx_rs_thresh,
803 (int)dev->data->port_id,
807 if (tx_free_thresh >= (nb_desc - 3)) {
808 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
809 "tx_free_thresh must be less than the "
810 "number of TX descriptors minus 3. "
811 "(tx_free_thresh=%u port=%d queue=%d)",
812 (unsigned int)tx_free_thresh,
813 (int)dev->data->port_id,
817 if (tx_rs_thresh > tx_free_thresh) {
818 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
819 "equal to tx_free_thresh. (tx_free_thresh=%u"
820 " tx_rs_thresh=%u port=%d queue=%d)",
821 (unsigned int)tx_free_thresh,
822 (unsigned int)tx_rs_thresh,
823 (int)dev->data->port_id,
827 if ((nb_desc % tx_rs_thresh) != 0) {
828 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
829 "number of TX descriptors. (tx_rs_thresh=%u"
830 " port=%d queue=%d)",
831 (unsigned int)tx_rs_thresh,
832 (int)dev->data->port_id,
836 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
837 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
838 "tx_rs_thresh is greater than 1. "
839 "(tx_rs_thresh=%u port=%d queue=%d)",
840 (unsigned int)tx_rs_thresh,
841 (int)dev->data->port_id,
846 /* Free memory if needed. */
847 if (dev->data->tx_queues[queue_idx]) {
848 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
849 dev->data->tx_queues[queue_idx] = NULL;
852 /* Allocate the TX queue data structure. */
853 txq = rte_zmalloc_socket(NULL,
854 sizeof(struct ice_tx_queue),
858 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
859 "tx queue structure");
863 /* Allocate TX hardware ring descriptors. */
864 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
865 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
866 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
867 ring_size, ICE_RING_BASE_ALIGN,
870 ice_tx_queue_release(txq);
871 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
875 txq->nb_tx_desc = nb_desc;
876 txq->tx_rs_thresh = tx_rs_thresh;
877 txq->tx_free_thresh = tx_free_thresh;
878 txq->pthresh = tx_conf->tx_thresh.pthresh;
879 txq->hthresh = tx_conf->tx_thresh.hthresh;
880 txq->wthresh = tx_conf->tx_thresh.wthresh;
881 txq->queue_id = queue_idx;
883 txq->reg_idx = vsi->base_queue + queue_idx;
884 txq->port_id = dev->data->port_id;
885 txq->offloads = offloads;
887 txq->tx_deferred_start = tx_conf->tx_deferred_start;
889 txq->tx_ring_dma = tz->iova;
890 txq->tx_ring = tz->addr;
892 /* Allocate software ring */
894 rte_zmalloc_socket(NULL,
895 sizeof(struct ice_tx_entry) * nb_desc,
899 ice_tx_queue_release(txq);
900 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
904 ice_reset_tx_queue(txq);
906 dev->data->tx_queues[queue_idx] = txq;
907 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
908 ice_set_tx_function_flag(dev, txq);
914 ice_tx_queue_release(void *txq)
916 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
919 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
923 ice_tx_queue_release_mbufs(q);
924 rte_free(q->sw_ring);
929 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
930 struct rte_eth_rxq_info *qinfo)
932 struct ice_rx_queue *rxq;
934 rxq = dev->data->rx_queues[queue_id];
937 qinfo->scattered_rx = dev->data->scattered_rx;
938 qinfo->nb_desc = rxq->nb_rx_desc;
940 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
941 qinfo->conf.rx_drop_en = rxq->drop_en;
942 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
946 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
947 struct rte_eth_txq_info *qinfo)
949 struct ice_tx_queue *txq;
951 txq = dev->data->tx_queues[queue_id];
953 qinfo->nb_desc = txq->nb_tx_desc;
955 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
956 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
957 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
959 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
960 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
961 qinfo->conf.offloads = txq->offloads;
962 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
966 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
968 #define ICE_RXQ_SCAN_INTERVAL 4
969 volatile union ice_rx_flex_desc *rxdp;
970 struct ice_rx_queue *rxq;
973 rxq = dev->data->rx_queues[rx_queue_id];
974 rxdp = (volatile union ice_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
975 while ((desc < rxq->nb_rx_desc) &&
976 rte_le_to_cpu_16(rxdp->wb.status_error0) &
977 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
979 * Check the DD bit of a rx descriptor of each 4 in a group,
980 * to avoid checking too frequently and downgrading performance
983 desc += ICE_RXQ_SCAN_INTERVAL;
984 rxdp += ICE_RXQ_SCAN_INTERVAL;
985 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
986 rxdp = (volatile union ice_rx_flex_desc *)
987 &(rxq->rx_ring[rxq->rx_tail +
988 desc - rxq->nb_rx_desc]);
994 #define ICE_RX_FLEX_ERR0_BITS \
995 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
996 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
997 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
998 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
999 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1000 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1002 /* Rx L3/L4 checksum */
1003 static inline uint64_t
1004 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1008 /* check if HW has decoded the packet and checksum */
1009 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1012 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1013 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1017 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1018 flags |= PKT_RX_IP_CKSUM_BAD;
1020 flags |= PKT_RX_IP_CKSUM_GOOD;
1022 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1023 flags |= PKT_RX_L4_CKSUM_BAD;
1025 flags |= PKT_RX_L4_CKSUM_GOOD;
1027 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1028 flags |= PKT_RX_EIP_CKSUM_BAD;
1034 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1036 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1037 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1038 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1040 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1041 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1042 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1047 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1048 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1049 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1050 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1051 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1052 mb->vlan_tci_outer = mb->vlan_tci;
1053 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1054 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1055 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1056 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1058 mb->vlan_tci_outer = 0;
1061 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1062 mb->vlan_tci, mb->vlan_tci_outer);
1066 ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
1067 volatile union ice_rx_flex_desc *rxdp)
1069 volatile struct ice_32b_rx_flex_desc_comms *desc =
1070 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
1073 stat_err = rte_le_to_cpu_16(desc->status_error0);
1074 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
1075 mb->ol_flags |= PKT_RX_RSS_HASH;
1076 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
1080 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1081 #define ICE_LOOK_AHEAD 8
1082 #if (ICE_LOOK_AHEAD != 8)
1083 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1086 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1088 volatile union ice_rx_flex_desc *rxdp;
1089 struct ice_rx_entry *rxep;
1090 struct rte_mbuf *mb;
1093 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1094 int32_t i, j, nb_rx = 0;
1095 uint64_t pkt_flags = 0;
1096 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1098 rxdp = (volatile union ice_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1099 rxep = &rxq->sw_ring[rxq->rx_tail];
1101 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1103 /* Make sure there is at least 1 packet to receive */
1104 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1108 * Scan LOOK_AHEAD descriptors at a time to determine which
1109 * descriptors reference packets that are ready to be received.
1111 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1112 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1113 /* Read desc statuses backwards to avoid race condition */
1114 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1115 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1119 /* Compute how many status bits were set */
1120 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1121 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1125 /* Translate descriptor info to mbuf parameters */
1126 for (j = 0; j < nb_dd; j++) {
1128 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1129 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1130 mb->data_len = pkt_len;
1131 mb->pkt_len = pkt_len;
1133 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1134 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1135 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1136 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1137 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1138 ice_rxd_to_pkt_fields(mb, &rxdp[j]);
1140 mb->ol_flags |= pkt_flags;
1143 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1144 rxq->rx_stage[i + j] = rxep[j].mbuf;
1146 if (nb_dd != ICE_LOOK_AHEAD)
1150 /* Clear software ring entries */
1151 for (i = 0; i < nb_rx; i++)
1152 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1154 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1155 "port_id=%u, queue_id=%u, nb_rx=%d",
1156 rxq->port_id, rxq->queue_id, nb_rx);
1161 static inline uint16_t
1162 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1163 struct rte_mbuf **rx_pkts,
1167 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1169 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1171 for (i = 0; i < nb_pkts; i++)
1172 rx_pkts[i] = stage[i];
1174 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1175 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1181 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1183 volatile union ice_rx_desc *rxdp;
1184 struct ice_rx_entry *rxep;
1185 struct rte_mbuf *mb;
1186 uint16_t alloc_idx, i;
1190 /* Allocate buffers in bulk */
1191 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1192 (rxq->rx_free_thresh - 1));
1193 rxep = &rxq->sw_ring[alloc_idx];
1194 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1195 rxq->rx_free_thresh);
1196 if (unlikely(diag != 0)) {
1197 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1201 rxdp = &rxq->rx_ring[alloc_idx];
1202 for (i = 0; i < rxq->rx_free_thresh; i++) {
1203 if (likely(i < (rxq->rx_free_thresh - 1)))
1204 /* Prefetch next mbuf */
1205 rte_prefetch0(rxep[i + 1].mbuf);
1208 rte_mbuf_refcnt_set(mb, 1);
1210 mb->data_off = RTE_PKTMBUF_HEADROOM;
1212 mb->port = rxq->port_id;
1213 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1214 rxdp[i].read.hdr_addr = 0;
1215 rxdp[i].read.pkt_addr = dma_addr;
1218 /* Update rx tail regsiter */
1220 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1222 rxq->rx_free_trigger =
1223 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1224 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1225 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1230 static inline uint16_t
1231 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1233 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1235 struct rte_eth_dev *dev;
1240 if (rxq->rx_nb_avail)
1241 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1243 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1244 rxq->rx_next_avail = 0;
1245 rxq->rx_nb_avail = nb_rx;
1246 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1248 if (rxq->rx_tail > rxq->rx_free_trigger) {
1249 if (ice_rx_alloc_bufs(rxq) != 0) {
1252 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1253 dev->data->rx_mbuf_alloc_failed +=
1254 rxq->rx_free_thresh;
1255 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1256 "port_id=%u, queue_id=%u",
1257 rxq->port_id, rxq->queue_id);
1258 rxq->rx_nb_avail = 0;
1259 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1260 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1261 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1267 if (rxq->rx_tail >= rxq->nb_rx_desc)
1270 if (rxq->rx_nb_avail)
1271 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1277 ice_recv_pkts_bulk_alloc(void *rx_queue,
1278 struct rte_mbuf **rx_pkts,
1285 if (unlikely(nb_pkts == 0))
1288 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1289 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1292 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1293 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1294 nb_rx = (uint16_t)(nb_rx + count);
1295 nb_pkts = (uint16_t)(nb_pkts - count);
1304 ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
1305 struct rte_mbuf __rte_unused **rx_pkts,
1306 uint16_t __rte_unused nb_pkts)
1310 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
1313 ice_recv_scattered_pkts(void *rx_queue,
1314 struct rte_mbuf **rx_pkts,
1317 struct ice_rx_queue *rxq = rx_queue;
1318 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1319 volatile union ice_rx_flex_desc *rxdp;
1320 union ice_rx_flex_desc rxd;
1321 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1322 struct ice_rx_entry *rxe;
1323 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1324 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1325 struct rte_mbuf *nmb; /* new allocated mbuf */
1326 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1327 uint16_t rx_id = rxq->rx_tail;
1329 uint16_t nb_hold = 0;
1330 uint16_t rx_packet_len;
1331 uint16_t rx_stat_err0;
1334 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1335 struct rte_eth_dev *dev;
1337 while (nb_rx < nb_pkts) {
1338 rxdp = (volatile union ice_rx_flex_desc *)&rx_ring[rx_id];
1339 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1341 /* Check the DD bit first */
1342 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1346 nmb = rte_mbuf_raw_alloc(rxq->mp);
1347 if (unlikely(!nmb)) {
1348 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1349 dev->data->rx_mbuf_alloc_failed++;
1352 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1355 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1357 if (unlikely(rx_id == rxq->nb_rx_desc))
1360 /* Prefetch next mbuf */
1361 rte_prefetch0(sw_ring[rx_id].mbuf);
1364 * When next RX descriptor is on a cache line boundary,
1365 * prefetch the next 4 RX descriptors and next 8 pointers
1368 if ((rx_id & 0x3) == 0) {
1369 rte_prefetch0(&rx_ring[rx_id]);
1370 rte_prefetch0(&sw_ring[rx_id]);
1376 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1378 /* Set data buffer address and data length of the mbuf */
1379 rxdp->read.hdr_addr = 0;
1380 rxdp->read.pkt_addr = dma_addr;
1381 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1382 ICE_RX_FLX_DESC_PKT_LEN_M;
1383 rxm->data_len = rx_packet_len;
1384 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1387 * If this is the first buffer of the received packet, set the
1388 * pointer to the first mbuf of the packet and initialize its
1389 * context. Otherwise, update the total length and the number
1390 * of segments of the current scattered packet, and update the
1391 * pointer to the last mbuf of the current packet.
1395 first_seg->nb_segs = 1;
1396 first_seg->pkt_len = rx_packet_len;
1398 first_seg->pkt_len =
1399 (uint16_t)(first_seg->pkt_len +
1401 first_seg->nb_segs++;
1402 last_seg->next = rxm;
1406 * If this is not the last buffer of the received packet,
1407 * update the pointer to the last mbuf of the current scattered
1408 * packet and continue to parse the RX ring.
1410 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1416 * This is the last buffer of the received packet. If the CRC
1417 * is not stripped by the hardware:
1418 * - Subtract the CRC length from the total packet length.
1419 * - If the last buffer only contains the whole CRC or a part
1420 * of it, free the mbuf associated to the last buffer. If part
1421 * of the CRC is also contained in the previous mbuf, subtract
1422 * the length of that CRC part from the data length of the
1426 if (unlikely(rxq->crc_len > 0)) {
1427 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1428 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1429 rte_pktmbuf_free_seg(rxm);
1430 first_seg->nb_segs--;
1431 last_seg->data_len =
1432 (uint16_t)(last_seg->data_len -
1433 (RTE_ETHER_CRC_LEN - rx_packet_len));
1434 last_seg->next = NULL;
1436 rxm->data_len = (uint16_t)(rx_packet_len -
1440 first_seg->port = rxq->port_id;
1441 first_seg->ol_flags = 0;
1442 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1443 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1444 ice_rxd_to_vlan_tci(first_seg, &rxd);
1445 ice_rxd_to_pkt_fields(first_seg, &rxd);
1446 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1447 first_seg->ol_flags |= pkt_flags;
1448 /* Prefetch data of first segment, if configured to do so. */
1449 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1450 first_seg->data_off));
1451 rx_pkts[nb_rx++] = first_seg;
1455 /* Record index of the next RX descriptor to probe. */
1456 rxq->rx_tail = rx_id;
1457 rxq->pkt_first_seg = first_seg;
1458 rxq->pkt_last_seg = last_seg;
1461 * If the number of free RX descriptors is greater than the RX free
1462 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1463 * register. Update the RDT with the value of the last processed RX
1464 * descriptor minus 1, to guarantee that the RDT register is never
1465 * equal to the RDH register, which creates a "full" ring situtation
1466 * from the hardware point of view.
1468 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1469 if (nb_hold > rxq->rx_free_thresh) {
1470 rx_id = (uint16_t)(rx_id == 0 ?
1471 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1472 /* write TAIL register */
1473 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1476 rxq->nb_rx_hold = nb_hold;
1478 /* return received packet in the burst */
1483 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1485 static const uint32_t ptypes[] = {
1486 /* refers to ice_get_default_pkt_type() */
1488 RTE_PTYPE_L2_ETHER_LLDP,
1489 RTE_PTYPE_L2_ETHER_ARP,
1490 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1491 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1494 RTE_PTYPE_L4_NONFRAG,
1498 RTE_PTYPE_TUNNEL_GRENAT,
1499 RTE_PTYPE_TUNNEL_IP,
1500 RTE_PTYPE_INNER_L2_ETHER,
1501 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1502 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1503 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1504 RTE_PTYPE_INNER_L4_FRAG,
1505 RTE_PTYPE_INNER_L4_ICMP,
1506 RTE_PTYPE_INNER_L4_NONFRAG,
1507 RTE_PTYPE_INNER_L4_SCTP,
1508 RTE_PTYPE_INNER_L4_TCP,
1509 RTE_PTYPE_INNER_L4_UDP,
1510 RTE_PTYPE_TUNNEL_GTPC,
1511 RTE_PTYPE_TUNNEL_GTPU,
1515 if (dev->rx_pkt_burst == ice_recv_pkts ||
1516 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1517 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1519 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1523 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1524 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1525 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1526 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1534 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1536 volatile union ice_rx_flex_desc *rxdp;
1537 struct ice_rx_queue *rxq = rx_queue;
1540 if (unlikely(offset >= rxq->nb_rx_desc))
1543 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1544 return RTE_ETH_RX_DESC_UNAVAIL;
1546 desc = rxq->rx_tail + offset;
1547 if (desc >= rxq->nb_rx_desc)
1548 desc -= rxq->nb_rx_desc;
1550 rxdp = (volatile union ice_rx_flex_desc *)&rxq->rx_ring[desc];
1551 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1552 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
1553 return RTE_ETH_RX_DESC_DONE;
1555 return RTE_ETH_RX_DESC_AVAIL;
1559 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1561 struct ice_tx_queue *txq = tx_queue;
1562 volatile uint64_t *status;
1563 uint64_t mask, expect;
1566 if (unlikely(offset >= txq->nb_tx_desc))
1569 desc = txq->tx_tail + offset;
1570 /* go to next desc that has the RS bit */
1571 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1573 if (desc >= txq->nb_tx_desc) {
1574 desc -= txq->nb_tx_desc;
1575 if (desc >= txq->nb_tx_desc)
1576 desc -= txq->nb_tx_desc;
1579 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1580 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1581 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1582 ICE_TXD_QW1_DTYPE_S);
1583 if ((*status & mask) == expect)
1584 return RTE_ETH_TX_DESC_DONE;
1586 return RTE_ETH_TX_DESC_FULL;
1590 ice_clear_queues(struct rte_eth_dev *dev)
1594 PMD_INIT_FUNC_TRACE();
1596 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1597 ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
1598 ice_reset_tx_queue(dev->data->tx_queues[i]);
1601 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1602 ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
1603 ice_reset_rx_queue(dev->data->rx_queues[i]);
1608 ice_free_queues(struct rte_eth_dev *dev)
1612 PMD_INIT_FUNC_TRACE();
1614 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1615 if (!dev->data->rx_queues[i])
1617 ice_rx_queue_release(dev->data->rx_queues[i]);
1618 dev->data->rx_queues[i] = NULL;
1620 dev->data->nb_rx_queues = 0;
1622 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1623 if (!dev->data->tx_queues[i])
1625 ice_tx_queue_release(dev->data->tx_queues[i]);
1626 dev->data->tx_queues[i] = NULL;
1628 dev->data->nb_tx_queues = 0;
1632 ice_recv_pkts(void *rx_queue,
1633 struct rte_mbuf **rx_pkts,
1636 struct ice_rx_queue *rxq = rx_queue;
1637 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1638 volatile union ice_rx_flex_desc *rxdp;
1639 union ice_rx_flex_desc rxd;
1640 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1641 struct ice_rx_entry *rxe;
1642 struct rte_mbuf *nmb; /* new allocated mbuf */
1643 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1644 uint16_t rx_id = rxq->rx_tail;
1646 uint16_t nb_hold = 0;
1647 uint16_t rx_packet_len;
1648 uint16_t rx_stat_err0;
1651 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1652 struct rte_eth_dev *dev;
1654 while (nb_rx < nb_pkts) {
1655 rxdp = (volatile union ice_rx_flex_desc *)&rx_ring[rx_id];
1656 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1658 /* Check the DD bit first */
1659 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1663 nmb = rte_mbuf_raw_alloc(rxq->mp);
1664 if (unlikely(!nmb)) {
1665 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1666 dev->data->rx_mbuf_alloc_failed++;
1669 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1672 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1674 if (unlikely(rx_id == rxq->nb_rx_desc))
1679 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1682 * fill the read format of descriptor with physic address in
1683 * new allocated mbuf: nmb
1685 rxdp->read.hdr_addr = 0;
1686 rxdp->read.pkt_addr = dma_addr;
1688 /* calculate rx_packet_len of the received pkt */
1689 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1690 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1692 /* fill old mbuf with received descriptor: rxd */
1693 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1694 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1697 rxm->pkt_len = rx_packet_len;
1698 rxm->data_len = rx_packet_len;
1699 rxm->port = rxq->port_id;
1700 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1701 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1702 ice_rxd_to_vlan_tci(rxm, &rxd);
1703 ice_rxd_to_pkt_fields(rxm, &rxd);
1704 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1705 rxm->ol_flags |= pkt_flags;
1706 /* copy old mbuf to rx_pkts */
1707 rx_pkts[nb_rx++] = rxm;
1709 rxq->rx_tail = rx_id;
1711 * If the number of free RX descriptors is greater than the RX free
1712 * threshold of the queue, advance the receive tail register of queue.
1713 * Update that register with the value of the last processed RX
1714 * descriptor minus 1.
1716 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1717 if (nb_hold > rxq->rx_free_thresh) {
1718 rx_id = (uint16_t)(rx_id == 0 ?
1719 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1720 /* write TAIL register */
1721 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1724 rxq->nb_rx_hold = nb_hold;
1726 /* return received packet in the burst */
1731 ice_parse_tunneling_params(uint64_t ol_flags,
1732 union ice_tx_offload tx_offload,
1733 uint32_t *cd_tunneling)
1735 /* EIPT: External (outer) IP header type */
1736 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1737 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
1738 else if (ol_flags & PKT_TX_OUTER_IPV4)
1739 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1740 else if (ol_flags & PKT_TX_OUTER_IPV6)
1741 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
1743 /* EIPLEN: External (outer) IP header length, in DWords */
1744 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
1745 ICE_TXD_CTX_QW0_EIPLEN_S;
1747 /* L4TUNT: L4 Tunneling Type */
1748 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
1749 case PKT_TX_TUNNEL_IPIP:
1750 /* for non UDP / GRE tunneling, set to 00b */
1752 case PKT_TX_TUNNEL_VXLAN:
1753 case PKT_TX_TUNNEL_GENEVE:
1754 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
1756 case PKT_TX_TUNNEL_GRE:
1757 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
1760 PMD_TX_LOG(ERR, "Tunnel type not supported");
1764 /* L4TUNLEN: L4 Tunneling Length, in Words
1766 * We depend on app to set rte_mbuf.l2_len correctly.
1767 * For IP in GRE it should be set to the length of the GRE
1769 * For MAC in GRE or MAC in UDP it should be set to the length
1770 * of the GRE or UDP headers plus the inner MAC up to including
1771 * its last Ethertype.
1772 * If MPLS labels exists, it should include them as well.
1774 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
1775 ICE_TXD_CTX_QW0_NATLEN_S;
1777 if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
1778 (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1779 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
1780 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
1784 ice_txd_enable_checksum(uint64_t ol_flags,
1786 uint32_t *td_offset,
1787 union ice_tx_offload tx_offload)
1790 if (ol_flags & PKT_TX_TUNNEL_MASK)
1791 *td_offset |= (tx_offload.outer_l2_len >> 1)
1792 << ICE_TX_DESC_LEN_MACLEN_S;
1794 *td_offset |= (tx_offload.l2_len >> 1)
1795 << ICE_TX_DESC_LEN_MACLEN_S;
1797 /* Enable L3 checksum offloads */
1798 if (ol_flags & PKT_TX_IP_CKSUM) {
1799 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1800 *td_offset |= (tx_offload.l3_len >> 2) <<
1801 ICE_TX_DESC_LEN_IPLEN_S;
1802 } else if (ol_flags & PKT_TX_IPV4) {
1803 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1804 *td_offset |= (tx_offload.l3_len >> 2) <<
1805 ICE_TX_DESC_LEN_IPLEN_S;
1806 } else if (ol_flags & PKT_TX_IPV6) {
1807 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1808 *td_offset |= (tx_offload.l3_len >> 2) <<
1809 ICE_TX_DESC_LEN_IPLEN_S;
1812 if (ol_flags & PKT_TX_TCP_SEG) {
1813 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1814 *td_offset |= (tx_offload.l4_len >> 2) <<
1815 ICE_TX_DESC_LEN_L4_LEN_S;
1819 /* Enable L4 checksum offloads */
1820 switch (ol_flags & PKT_TX_L4_MASK) {
1821 case PKT_TX_TCP_CKSUM:
1822 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1823 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
1824 ICE_TX_DESC_LEN_L4_LEN_S;
1826 case PKT_TX_SCTP_CKSUM:
1827 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1828 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
1829 ICE_TX_DESC_LEN_L4_LEN_S;
1831 case PKT_TX_UDP_CKSUM:
1832 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1833 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
1834 ICE_TX_DESC_LEN_L4_LEN_S;
1842 ice_xmit_cleanup(struct ice_tx_queue *txq)
1844 struct ice_tx_entry *sw_ring = txq->sw_ring;
1845 volatile struct ice_tx_desc *txd = txq->tx_ring;
1846 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1847 uint16_t nb_tx_desc = txq->nb_tx_desc;
1848 uint16_t desc_to_clean_to;
1849 uint16_t nb_tx_to_clean;
1851 /* Determine the last descriptor needing to be cleaned */
1852 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
1853 if (desc_to_clean_to >= nb_tx_desc)
1854 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1856 /* Check to make sure the last descriptor to clean is done */
1857 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1858 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
1859 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
1860 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1861 "(port=%d queue=%d) value=0x%"PRIx64"\n",
1863 txq->port_id, txq->queue_id,
1864 txd[desc_to_clean_to].cmd_type_offset_bsz);
1865 /* Failed to clean any descriptors */
1869 /* Figure out how many descriptors will be cleaned */
1870 if (last_desc_cleaned > desc_to_clean_to)
1871 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1874 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1877 /* The last descriptor to clean is done, so that means all the
1878 * descriptors from the last descriptor that was cleaned
1879 * up to the last descriptor with the RS bit set
1880 * are done. Only reset the threshold descriptor.
1882 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1884 /* Update the txq to reflect the last descriptor that was cleaned */
1885 txq->last_desc_cleaned = desc_to_clean_to;
1886 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
1891 /* Construct the tx flags */
1892 static inline uint64_t
1893 ice_build_ctob(uint32_t td_cmd,
1898 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
1899 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
1900 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
1901 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1902 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
1905 /* Check if the context descriptor is needed for TX offloading */
1906 static inline uint16_t
1907 ice_calc_context_desc(uint64_t flags)
1909 static uint64_t mask = PKT_TX_TCP_SEG |
1911 PKT_TX_OUTER_IP_CKSUM |
1914 return (flags & mask) ? 1 : 0;
1917 /* set ice TSO context descriptor */
1918 static inline uint64_t
1919 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
1921 uint64_t ctx_desc = 0;
1922 uint32_t cd_cmd, hdr_len, cd_tso_len;
1924 if (!tx_offload.l4_len) {
1925 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1929 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
1930 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
1931 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
1933 cd_cmd = ICE_TX_CTX_DESC_TSO;
1934 cd_tso_len = mbuf->pkt_len - hdr_len;
1935 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
1936 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1937 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
1943 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1945 struct ice_tx_queue *txq;
1946 volatile struct ice_tx_desc *tx_ring;
1947 volatile struct ice_tx_desc *txd;
1948 struct ice_tx_entry *sw_ring;
1949 struct ice_tx_entry *txe, *txn;
1950 struct rte_mbuf *tx_pkt;
1951 struct rte_mbuf *m_seg;
1952 uint32_t cd_tunneling_params;
1957 uint32_t td_cmd = 0;
1958 uint32_t td_offset = 0;
1959 uint32_t td_tag = 0;
1961 uint64_t buf_dma_addr;
1963 union ice_tx_offload tx_offload = {0};
1966 sw_ring = txq->sw_ring;
1967 tx_ring = txq->tx_ring;
1968 tx_id = txq->tx_tail;
1969 txe = &sw_ring[tx_id];
1971 /* Check if the descriptor ring needs to be cleaned. */
1972 if (txq->nb_tx_free < txq->tx_free_thresh)
1973 ice_xmit_cleanup(txq);
1975 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1976 tx_pkt = *tx_pkts++;
1979 ol_flags = tx_pkt->ol_flags;
1980 tx_offload.l2_len = tx_pkt->l2_len;
1981 tx_offload.l3_len = tx_pkt->l3_len;
1982 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
1983 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
1984 tx_offload.l4_len = tx_pkt->l4_len;
1985 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1986 /* Calculate the number of context descriptors needed. */
1987 nb_ctx = ice_calc_context_desc(ol_flags);
1989 /* The number of descriptors that must be allocated for
1990 * a packet equals to the number of the segments of that
1991 * packet plus the number of context descriptor if needed.
1993 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1994 tx_last = (uint16_t)(tx_id + nb_used - 1);
1997 if (tx_last >= txq->nb_tx_desc)
1998 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2000 if (nb_used > txq->nb_tx_free) {
2001 if (ice_xmit_cleanup(txq) != 0) {
2006 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2007 while (nb_used > txq->nb_tx_free) {
2008 if (ice_xmit_cleanup(txq) != 0) {
2017 /* Descriptor based VLAN insertion */
2018 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2019 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2020 td_tag = tx_pkt->vlan_tci;
2023 /* Fill in tunneling parameters if necessary */
2024 cd_tunneling_params = 0;
2025 if (ol_flags & PKT_TX_TUNNEL_MASK)
2026 ice_parse_tunneling_params(ol_flags, tx_offload,
2027 &cd_tunneling_params);
2029 /* Enable checksum offloading */
2030 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
2031 ice_txd_enable_checksum(ol_flags, &td_cmd,
2032 &td_offset, tx_offload);
2036 /* Setup TX context descriptor if required */
2037 volatile struct ice_tx_ctx_desc *ctx_txd =
2038 (volatile struct ice_tx_ctx_desc *)
2040 uint16_t cd_l2tag2 = 0;
2041 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2043 txn = &sw_ring[txe->next_id];
2044 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2046 rte_pktmbuf_free_seg(txe->mbuf);
2050 if (ol_flags & PKT_TX_TCP_SEG)
2051 cd_type_cmd_tso_mss |=
2052 ice_set_tso_ctx(tx_pkt, tx_offload);
2054 ctx_txd->tunneling_params =
2055 rte_cpu_to_le_32(cd_tunneling_params);
2057 /* TX context descriptor based double VLAN insert */
2058 if (ol_flags & PKT_TX_QINQ) {
2059 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2060 cd_type_cmd_tso_mss |=
2061 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2062 ICE_TXD_CTX_QW1_CMD_S);
2064 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2066 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2068 txe->last_id = tx_last;
2069 tx_id = txe->next_id;
2075 txd = &tx_ring[tx_id];
2076 txn = &sw_ring[txe->next_id];
2079 rte_pktmbuf_free_seg(txe->mbuf);
2082 /* Setup TX Descriptor */
2083 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2084 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2085 txd->cmd_type_offset_bsz =
2086 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2087 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2088 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2089 ((uint64_t)m_seg->data_len <<
2090 ICE_TXD_QW1_TX_BUF_SZ_S) |
2091 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2093 txe->last_id = tx_last;
2094 tx_id = txe->next_id;
2096 m_seg = m_seg->next;
2099 /* fill the last descriptor with End of Packet (EOP) bit */
2100 td_cmd |= ICE_TX_DESC_CMD_EOP;
2101 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2102 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2104 /* set RS bit on the last descriptor of one packet */
2105 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2106 PMD_TX_FREE_LOG(DEBUG,
2107 "Setting RS bit on TXD id="
2108 "%4u (port=%d queue=%d)",
2109 tx_last, txq->port_id, txq->queue_id);
2111 td_cmd |= ICE_TX_DESC_CMD_RS;
2113 /* Update txq RS bit counters */
2114 txq->nb_tx_used = 0;
2116 txd->cmd_type_offset_bsz |=
2117 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2123 /* update Tail register */
2124 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2125 txq->tx_tail = tx_id;
2130 static inline int __attribute__((always_inline))
2131 ice_tx_free_bufs(struct ice_tx_queue *txq)
2133 struct ice_tx_entry *txep;
2136 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2137 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2138 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2141 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2143 for (i = 0; i < txq->tx_rs_thresh; i++)
2144 rte_prefetch0((txep + i)->mbuf);
2146 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2147 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2148 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2152 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2153 rte_pktmbuf_free_seg(txep->mbuf);
2158 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2159 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2160 if (txq->tx_next_dd >= txq->nb_tx_desc)
2161 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2163 return txq->tx_rs_thresh;
2166 /* Populate 4 descriptors with data from 4 mbufs */
2168 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2173 for (i = 0; i < 4; i++, txdp++, pkts++) {
2174 dma_addr = rte_mbuf_data_iova(*pkts);
2175 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2176 txdp->cmd_type_offset_bsz =
2177 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2178 (*pkts)->data_len, 0);
2182 /* Populate 1 descriptor with data from 1 mbuf */
2184 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2188 dma_addr = rte_mbuf_data_iova(*pkts);
2189 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2190 txdp->cmd_type_offset_bsz =
2191 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2192 (*pkts)->data_len, 0);
2196 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2199 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2200 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2201 const int N_PER_LOOP = 4;
2202 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2203 int mainpart, leftover;
2207 * Process most of the packets in chunks of N pkts. Any
2208 * leftover packets will get processed one at a time.
2210 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2211 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2212 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2213 /* Copy N mbuf pointers to the S/W ring */
2214 for (j = 0; j < N_PER_LOOP; ++j)
2215 (txep + i + j)->mbuf = *(pkts + i + j);
2216 tx4(txdp + i, pkts + i);
2219 if (unlikely(leftover > 0)) {
2220 for (i = 0; i < leftover; ++i) {
2221 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2222 tx1(txdp + mainpart + i, pkts + mainpart + i);
2227 static inline uint16_t
2228 tx_xmit_pkts(struct ice_tx_queue *txq,
2229 struct rte_mbuf **tx_pkts,
2232 volatile struct ice_tx_desc *txr = txq->tx_ring;
2236 * Begin scanning the H/W ring for done descriptors when the number
2237 * of available descriptors drops below tx_free_thresh. For each done
2238 * descriptor, free the associated buffer.
2240 if (txq->nb_tx_free < txq->tx_free_thresh)
2241 ice_tx_free_bufs(txq);
2243 /* Use available descriptor only */
2244 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2245 if (unlikely(!nb_pkts))
2248 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2249 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2250 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2251 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2252 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2253 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2255 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2259 /* Fill hardware descriptor ring with mbuf data */
2260 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2261 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2263 /* Determin if RS bit needs to be set */
2264 if (txq->tx_tail > txq->tx_next_rs) {
2265 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2266 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2269 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2270 if (txq->tx_next_rs >= txq->nb_tx_desc)
2271 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2274 if (txq->tx_tail >= txq->nb_tx_desc)
2277 /* Update the tx tail register */
2279 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2285 ice_xmit_pkts_simple(void *tx_queue,
2286 struct rte_mbuf **tx_pkts,
2291 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2292 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2296 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2299 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2300 &tx_pkts[nb_tx], num);
2301 nb_tx = (uint16_t)(nb_tx + ret);
2302 nb_pkts = (uint16_t)(nb_pkts - ret);
2310 void __attribute__((cold))
2311 ice_set_rx_function(struct rte_eth_dev *dev)
2313 PMD_INIT_FUNC_TRACE();
2314 struct ice_adapter *ad =
2315 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2317 struct ice_rx_queue *rxq;
2319 bool use_avx2 = false;
2321 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2322 if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
2323 ad->rx_vec_allowed = true;
2324 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2325 rxq = dev->data->rx_queues[i];
2326 if (rxq && ice_rxq_vec_setup(rxq)) {
2327 ad->rx_vec_allowed = false;
2332 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2333 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2337 ad->rx_vec_allowed = false;
2341 if (ad->rx_vec_allowed) {
2342 if (dev->data->scattered_rx) {
2344 "Using %sVector Scattered Rx (port %d).",
2345 use_avx2 ? "avx2 " : "",
2346 dev->data->port_id);
2347 dev->rx_pkt_burst = use_avx2 ?
2348 ice_recv_scattered_pkts_vec_avx2 :
2349 ice_recv_scattered_pkts_vec;
2351 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2352 use_avx2 ? "avx2 " : "",
2353 dev->data->port_id);
2354 dev->rx_pkt_burst = use_avx2 ?
2355 ice_recv_pkts_vec_avx2 :
2363 if (dev->data->scattered_rx) {
2364 /* Set the non-LRO scattered function */
2366 "Using a Scattered function on port %d.",
2367 dev->data->port_id);
2368 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2369 } else if (ad->rx_bulk_alloc_allowed) {
2371 "Rx Burst Bulk Alloc Preconditions are "
2372 "satisfied. Rx Burst Bulk Alloc function "
2373 "will be used on port %d.",
2374 dev->data->port_id);
2375 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2378 "Rx Burst Bulk Alloc Preconditions are not "
2379 "satisfied, Normal Rx will be used on port %d.",
2380 dev->data->port_id);
2381 dev->rx_pkt_burst = ice_recv_pkts;
2385 void __attribute__((cold))
2386 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
2388 struct ice_adapter *ad =
2389 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2391 /* Use a simple Tx queue if possible (only fast free is allowed) */
2392 ad->tx_simple_allowed =
2394 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
2395 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
2397 if (ad->tx_simple_allowed)
2398 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
2402 "Simple Tx can NOT be enabled on Tx queue %u.",
2406 /*********************************************************************
2410 **********************************************************************/
2411 /* The default values of TSO MSS */
2412 #define ICE_MIN_TSO_MSS 64
2413 #define ICE_MAX_TSO_MSS 9728
2414 #define ICE_MAX_TSO_FRAME_SIZE 262144
2416 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2423 for (i = 0; i < nb_pkts; i++) {
2425 ol_flags = m->ol_flags;
2427 if (ol_flags & PKT_TX_TCP_SEG &&
2428 (m->tso_segsz < ICE_MIN_TSO_MSS ||
2429 m->tso_segsz > ICE_MAX_TSO_MSS ||
2430 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
2432 * MSS outside the range are considered malicious
2438 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2439 ret = rte_validate_tx_offload(m);
2445 ret = rte_net_intel_cksum_prepare(m);
2454 void __attribute__((cold))
2455 ice_set_tx_function(struct rte_eth_dev *dev)
2457 struct ice_adapter *ad =
2458 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2460 struct ice_tx_queue *txq;
2462 bool use_avx2 = false;
2464 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2465 if (!ice_tx_vec_dev_check(dev)) {
2466 ad->tx_vec_allowed = true;
2467 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2468 txq = dev->data->tx_queues[i];
2469 if (txq && ice_txq_vec_setup(txq)) {
2470 ad->tx_vec_allowed = false;
2475 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2476 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2480 ad->tx_vec_allowed = false;
2484 if (ad->tx_vec_allowed) {
2485 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2486 use_avx2 ? "avx2 " : "",
2487 dev->data->port_id);
2488 dev->tx_pkt_burst = use_avx2 ?
2489 ice_xmit_pkts_vec_avx2 :
2491 dev->tx_pkt_prepare = NULL;
2497 if (ad->tx_simple_allowed) {
2498 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
2499 dev->tx_pkt_burst = ice_xmit_pkts_simple;
2500 dev->tx_pkt_prepare = NULL;
2502 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
2503 dev->tx_pkt_burst = ice_xmit_pkts;
2504 dev->tx_pkt_prepare = ice_prep_pkts;
2508 /* For each value it means, datasheet of hardware can tell more details
2510 * @note: fix ice_dev_supported_ptypes_get() if any change here.
2512 static inline uint32_t
2513 ice_get_default_pkt_type(uint16_t ptype)
2515 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
2516 __rte_cache_aligned = {
2519 [1] = RTE_PTYPE_L2_ETHER,
2520 /* [2] - [5] reserved */
2521 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2522 /* [7] - [10] reserved */
2523 [11] = RTE_PTYPE_L2_ETHER_ARP,
2524 /* [12] - [21] reserved */
2526 /* Non tunneled IPv4 */
2527 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2529 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2530 RTE_PTYPE_L4_NONFRAG,
2531 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2534 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2536 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2538 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2542 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2543 RTE_PTYPE_TUNNEL_IP |
2544 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2545 RTE_PTYPE_INNER_L4_FRAG,
2546 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2547 RTE_PTYPE_TUNNEL_IP |
2548 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2549 RTE_PTYPE_INNER_L4_NONFRAG,
2550 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2551 RTE_PTYPE_TUNNEL_IP |
2552 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2553 RTE_PTYPE_INNER_L4_UDP,
2555 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2556 RTE_PTYPE_TUNNEL_IP |
2557 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2558 RTE_PTYPE_INNER_L4_TCP,
2559 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2560 RTE_PTYPE_TUNNEL_IP |
2561 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2562 RTE_PTYPE_INNER_L4_SCTP,
2563 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2564 RTE_PTYPE_TUNNEL_IP |
2565 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2566 RTE_PTYPE_INNER_L4_ICMP,
2569 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2570 RTE_PTYPE_TUNNEL_IP |
2571 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2572 RTE_PTYPE_INNER_L4_FRAG,
2573 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2574 RTE_PTYPE_TUNNEL_IP |
2575 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2576 RTE_PTYPE_INNER_L4_NONFRAG,
2577 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2578 RTE_PTYPE_TUNNEL_IP |
2579 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2580 RTE_PTYPE_INNER_L4_UDP,
2582 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2583 RTE_PTYPE_TUNNEL_IP |
2584 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2585 RTE_PTYPE_INNER_L4_TCP,
2586 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2587 RTE_PTYPE_TUNNEL_IP |
2588 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2589 RTE_PTYPE_INNER_L4_SCTP,
2590 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2591 RTE_PTYPE_TUNNEL_IP |
2592 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2593 RTE_PTYPE_INNER_L4_ICMP,
2595 /* IPv4 --> GRE/Teredo/VXLAN */
2596 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2597 RTE_PTYPE_TUNNEL_GRENAT,
2599 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2600 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2601 RTE_PTYPE_TUNNEL_GRENAT |
2602 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2603 RTE_PTYPE_INNER_L4_FRAG,
2604 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2605 RTE_PTYPE_TUNNEL_GRENAT |
2606 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2607 RTE_PTYPE_INNER_L4_NONFRAG,
2608 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2609 RTE_PTYPE_TUNNEL_GRENAT |
2610 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2611 RTE_PTYPE_INNER_L4_UDP,
2613 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2614 RTE_PTYPE_TUNNEL_GRENAT |
2615 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2616 RTE_PTYPE_INNER_L4_TCP,
2617 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2618 RTE_PTYPE_TUNNEL_GRENAT |
2619 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2620 RTE_PTYPE_INNER_L4_SCTP,
2621 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2622 RTE_PTYPE_TUNNEL_GRENAT |
2623 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2624 RTE_PTYPE_INNER_L4_ICMP,
2626 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2627 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2628 RTE_PTYPE_TUNNEL_GRENAT |
2629 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2630 RTE_PTYPE_INNER_L4_FRAG,
2631 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2632 RTE_PTYPE_TUNNEL_GRENAT |
2633 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2634 RTE_PTYPE_INNER_L4_NONFRAG,
2635 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2636 RTE_PTYPE_TUNNEL_GRENAT |
2637 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2638 RTE_PTYPE_INNER_L4_UDP,
2640 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2641 RTE_PTYPE_TUNNEL_GRENAT |
2642 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2643 RTE_PTYPE_INNER_L4_TCP,
2644 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2645 RTE_PTYPE_TUNNEL_GRENAT |
2646 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2647 RTE_PTYPE_INNER_L4_SCTP,
2648 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2649 RTE_PTYPE_TUNNEL_GRENAT |
2650 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2651 RTE_PTYPE_INNER_L4_ICMP,
2653 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2654 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2655 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2657 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2658 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2659 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2660 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2661 RTE_PTYPE_INNER_L4_FRAG,
2662 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2663 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2664 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2665 RTE_PTYPE_INNER_L4_NONFRAG,
2666 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2667 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2668 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2669 RTE_PTYPE_INNER_L4_UDP,
2671 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2672 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2673 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2674 RTE_PTYPE_INNER_L4_TCP,
2675 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2676 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2677 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2678 RTE_PTYPE_INNER_L4_SCTP,
2679 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2680 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2681 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2682 RTE_PTYPE_INNER_L4_ICMP,
2684 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2685 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2686 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2687 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2688 RTE_PTYPE_INNER_L4_FRAG,
2689 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2690 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2691 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2692 RTE_PTYPE_INNER_L4_NONFRAG,
2693 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2694 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2695 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2696 RTE_PTYPE_INNER_L4_UDP,
2698 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2699 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2700 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2701 RTE_PTYPE_INNER_L4_TCP,
2702 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2703 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2704 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2705 RTE_PTYPE_INNER_L4_SCTP,
2706 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2707 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2708 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2709 RTE_PTYPE_INNER_L4_ICMP,
2711 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2712 [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2713 RTE_PTYPE_TUNNEL_GRENAT |
2714 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2716 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2717 [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2718 RTE_PTYPE_TUNNEL_GRENAT |
2719 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2720 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2721 RTE_PTYPE_INNER_L4_FRAG,
2722 [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2723 RTE_PTYPE_TUNNEL_GRENAT |
2724 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2725 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2726 RTE_PTYPE_INNER_L4_NONFRAG,
2727 [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2728 RTE_PTYPE_TUNNEL_GRENAT |
2729 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2730 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2731 RTE_PTYPE_INNER_L4_UDP,
2733 [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2734 RTE_PTYPE_TUNNEL_GRENAT |
2735 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2736 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2737 RTE_PTYPE_INNER_L4_TCP,
2738 [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2739 RTE_PTYPE_TUNNEL_GRENAT |
2740 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2741 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2742 RTE_PTYPE_INNER_L4_SCTP,
2743 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2744 RTE_PTYPE_TUNNEL_GRENAT |
2745 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2746 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2747 RTE_PTYPE_INNER_L4_ICMP,
2749 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
2750 [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2751 RTE_PTYPE_TUNNEL_GRENAT |
2752 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2753 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2754 RTE_PTYPE_INNER_L4_FRAG,
2755 [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2756 RTE_PTYPE_TUNNEL_GRENAT |
2757 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2758 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2759 RTE_PTYPE_INNER_L4_NONFRAG,
2760 [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2761 RTE_PTYPE_TUNNEL_GRENAT |
2762 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2763 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2764 RTE_PTYPE_INNER_L4_UDP,
2766 [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2767 RTE_PTYPE_TUNNEL_GRENAT |
2768 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2769 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2770 RTE_PTYPE_INNER_L4_TCP,
2771 [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2772 RTE_PTYPE_TUNNEL_GRENAT |
2773 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2774 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2775 RTE_PTYPE_INNER_L4_SCTP,
2776 [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2777 RTE_PTYPE_TUNNEL_GRENAT |
2778 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2779 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2780 RTE_PTYPE_INNER_L4_ICMP,
2782 /* Non tunneled IPv6 */
2783 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2785 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2786 RTE_PTYPE_L4_NONFRAG,
2787 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2790 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2792 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2794 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2798 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2799 RTE_PTYPE_TUNNEL_IP |
2800 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2801 RTE_PTYPE_INNER_L4_FRAG,
2802 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2803 RTE_PTYPE_TUNNEL_IP |
2804 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2805 RTE_PTYPE_INNER_L4_NONFRAG,
2806 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2807 RTE_PTYPE_TUNNEL_IP |
2808 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2809 RTE_PTYPE_INNER_L4_UDP,
2811 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2812 RTE_PTYPE_TUNNEL_IP |
2813 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2814 RTE_PTYPE_INNER_L4_TCP,
2815 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2816 RTE_PTYPE_TUNNEL_IP |
2817 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2818 RTE_PTYPE_INNER_L4_SCTP,
2819 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2820 RTE_PTYPE_TUNNEL_IP |
2821 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2822 RTE_PTYPE_INNER_L4_ICMP,
2825 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2826 RTE_PTYPE_TUNNEL_IP |
2827 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2828 RTE_PTYPE_INNER_L4_FRAG,
2829 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2830 RTE_PTYPE_TUNNEL_IP |
2831 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2832 RTE_PTYPE_INNER_L4_NONFRAG,
2833 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2834 RTE_PTYPE_TUNNEL_IP |
2835 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2836 RTE_PTYPE_INNER_L4_UDP,
2837 /* [105] reserved */
2838 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2839 RTE_PTYPE_TUNNEL_IP |
2840 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2841 RTE_PTYPE_INNER_L4_TCP,
2842 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2843 RTE_PTYPE_TUNNEL_IP |
2844 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2845 RTE_PTYPE_INNER_L4_SCTP,
2846 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2847 RTE_PTYPE_TUNNEL_IP |
2848 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2849 RTE_PTYPE_INNER_L4_ICMP,
2851 /* IPv6 --> GRE/Teredo/VXLAN */
2852 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2853 RTE_PTYPE_TUNNEL_GRENAT,
2855 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2856 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2857 RTE_PTYPE_TUNNEL_GRENAT |
2858 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2859 RTE_PTYPE_INNER_L4_FRAG,
2860 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2861 RTE_PTYPE_TUNNEL_GRENAT |
2862 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2863 RTE_PTYPE_INNER_L4_NONFRAG,
2864 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2865 RTE_PTYPE_TUNNEL_GRENAT |
2866 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2867 RTE_PTYPE_INNER_L4_UDP,
2868 /* [113] reserved */
2869 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2870 RTE_PTYPE_TUNNEL_GRENAT |
2871 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2872 RTE_PTYPE_INNER_L4_TCP,
2873 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2874 RTE_PTYPE_TUNNEL_GRENAT |
2875 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2876 RTE_PTYPE_INNER_L4_SCTP,
2877 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2878 RTE_PTYPE_TUNNEL_GRENAT |
2879 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2880 RTE_PTYPE_INNER_L4_ICMP,
2882 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2883 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2884 RTE_PTYPE_TUNNEL_GRENAT |
2885 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2886 RTE_PTYPE_INNER_L4_FRAG,
2887 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2888 RTE_PTYPE_TUNNEL_GRENAT |
2889 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2890 RTE_PTYPE_INNER_L4_NONFRAG,
2891 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2892 RTE_PTYPE_TUNNEL_GRENAT |
2893 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2894 RTE_PTYPE_INNER_L4_UDP,
2895 /* [120] reserved */
2896 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2897 RTE_PTYPE_TUNNEL_GRENAT |
2898 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2899 RTE_PTYPE_INNER_L4_TCP,
2900 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2901 RTE_PTYPE_TUNNEL_GRENAT |
2902 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2903 RTE_PTYPE_INNER_L4_SCTP,
2904 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2905 RTE_PTYPE_TUNNEL_GRENAT |
2906 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2907 RTE_PTYPE_INNER_L4_ICMP,
2909 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2910 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2911 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2913 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2914 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2915 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2916 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2917 RTE_PTYPE_INNER_L4_FRAG,
2918 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2919 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2920 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2921 RTE_PTYPE_INNER_L4_NONFRAG,
2922 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2923 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2924 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2925 RTE_PTYPE_INNER_L4_UDP,
2926 /* [128] reserved */
2927 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2928 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2929 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2930 RTE_PTYPE_INNER_L4_TCP,
2931 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2932 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2933 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2934 RTE_PTYPE_INNER_L4_SCTP,
2935 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2936 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2937 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2938 RTE_PTYPE_INNER_L4_ICMP,
2940 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2941 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2942 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2943 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2944 RTE_PTYPE_INNER_L4_FRAG,
2945 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2946 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2947 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2948 RTE_PTYPE_INNER_L4_NONFRAG,
2949 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2950 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2951 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2952 RTE_PTYPE_INNER_L4_UDP,
2953 /* [135] reserved */
2954 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2955 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2956 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2957 RTE_PTYPE_INNER_L4_TCP,
2958 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2959 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2960 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2961 RTE_PTYPE_INNER_L4_SCTP,
2962 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2963 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2964 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2965 RTE_PTYPE_INNER_L4_ICMP,
2967 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2968 [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2969 RTE_PTYPE_TUNNEL_GRENAT |
2970 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2972 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2973 [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2974 RTE_PTYPE_TUNNEL_GRENAT |
2975 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2976 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2977 RTE_PTYPE_INNER_L4_FRAG,
2978 [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2979 RTE_PTYPE_TUNNEL_GRENAT |
2980 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2981 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2982 RTE_PTYPE_INNER_L4_NONFRAG,
2983 [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2984 RTE_PTYPE_TUNNEL_GRENAT |
2985 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2986 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2987 RTE_PTYPE_INNER_L4_UDP,
2988 /* [143] reserved */
2989 [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2990 RTE_PTYPE_TUNNEL_GRENAT |
2991 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2992 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2993 RTE_PTYPE_INNER_L4_TCP,
2994 [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2995 RTE_PTYPE_TUNNEL_GRENAT |
2996 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2997 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2998 RTE_PTYPE_INNER_L4_SCTP,
2999 [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3000 RTE_PTYPE_TUNNEL_GRENAT |
3001 RTE_PTYPE_INNER_L2_ETHER_VLAN |
3002 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3003 RTE_PTYPE_INNER_L4_ICMP,
3005 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
3006 [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3007 RTE_PTYPE_TUNNEL_GRENAT |
3008 RTE_PTYPE_INNER_L2_ETHER_VLAN |
3009 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3010 RTE_PTYPE_INNER_L4_FRAG,
3011 [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3012 RTE_PTYPE_TUNNEL_GRENAT |
3013 RTE_PTYPE_INNER_L2_ETHER_VLAN |
3014 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3015 RTE_PTYPE_INNER_L4_NONFRAG,
3016 [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3017 RTE_PTYPE_TUNNEL_GRENAT |
3018 RTE_PTYPE_INNER_L2_ETHER_VLAN |
3019 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3020 RTE_PTYPE_INNER_L4_UDP,
3021 /* [150] reserved */
3022 [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3023 RTE_PTYPE_TUNNEL_GRENAT |
3024 RTE_PTYPE_INNER_L2_ETHER_VLAN |
3025 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3026 RTE_PTYPE_INNER_L4_TCP,
3027 [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3028 RTE_PTYPE_TUNNEL_GRENAT |
3029 RTE_PTYPE_INNER_L2_ETHER_VLAN |
3030 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3031 RTE_PTYPE_INNER_L4_SCTP,
3032 [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3033 RTE_PTYPE_TUNNEL_GRENAT |
3034 RTE_PTYPE_INNER_L2_ETHER_VLAN |
3035 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3036 RTE_PTYPE_INNER_L4_ICMP,
3037 /* [154] - [255] reserved */
3038 [256] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3039 RTE_PTYPE_TUNNEL_GTPC,
3040 [257] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3041 RTE_PTYPE_TUNNEL_GTPC,
3042 [258] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3043 RTE_PTYPE_TUNNEL_GTPU,
3044 [259] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3045 RTE_PTYPE_TUNNEL_GTPU,
3046 /* [260] - [263] reserved */
3047 [264] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3048 RTE_PTYPE_TUNNEL_GTPC,
3049 [265] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3050 RTE_PTYPE_TUNNEL_GTPC,
3051 [266] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3052 RTE_PTYPE_TUNNEL_GTPU,
3053 [267] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3054 RTE_PTYPE_TUNNEL_GTPU,
3056 /* All others reserved */
3059 return type_table[ptype];
3062 void __attribute__((cold))
3063 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3065 struct ice_adapter *ad =
3066 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3069 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3070 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);