1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
10 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
14 PKT_TX_OUTER_IP_CKSUM)
16 #define ICE_RX_ERR_BITS 0x3f
18 static enum ice_status
19 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
21 struct ice_vsi *vsi = rxq->vsi;
22 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
23 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
24 struct ice_rlan_ctx rx_ctx;
26 uint16_t buf_size, len;
27 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
31 * The kernel driver uses flex descriptor. It sets the register
32 * to flex descriptor mode.
33 * DPDK uses legacy descriptor. It should set the register back
34 * to the default value, then uses legacy descriptor mode.
36 regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
37 QRXFLXP_CNTXT_RXDID_PRIO_M;
38 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
40 /* Set buffer size as the head split is disabled. */
41 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
42 RTE_PKTMBUF_HEADROOM);
44 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
45 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
46 rxq->max_pkt_len = RTE_MIN(len,
47 dev->data->dev_conf.rxmode.max_rx_pkt_len);
49 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
50 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
51 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
52 PMD_DRV_LOG(ERR, "maximum packet length must "
53 "be larger than %u and smaller than %u,"
54 "as jumbo frame is enabled",
55 (uint32_t)ETHER_MAX_LEN,
56 (uint32_t)ICE_FRAME_SIZE_MAX);
60 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
61 rxq->max_pkt_len > ETHER_MAX_LEN) {
62 PMD_DRV_LOG(ERR, "maximum packet length must be "
63 "larger than %u and smaller than %u, "
64 "as jumbo frame is disabled",
65 (uint32_t)ETHER_MIN_LEN,
66 (uint32_t)ETHER_MAX_LEN);
71 memset(&rx_ctx, 0, sizeof(rx_ctx));
73 rx_ctx.base = rxq->rx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
74 rx_ctx.qlen = rxq->nb_rx_desc;
75 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
76 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
77 rx_ctx.dtype = 0; /* No Header Split mode */
78 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
79 rx_ctx.dsize = 1; /* 32B descriptors */
81 rx_ctx.rxmax = rxq->max_pkt_len;
82 /* TPH: Transaction Layer Packet (TLP) processing hints */
83 rx_ctx.tphrdesc_ena = 1;
84 rx_ctx.tphwdesc_ena = 1;
85 rx_ctx.tphdata_ena = 1;
86 rx_ctx.tphhead_ena = 1;
87 /* Low Receive Queue Threshold defined in 64 descriptors units.
88 * When the number of free descriptors goes below the lrxqthresh,
89 * an immediate interrupt is triggered.
91 rx_ctx.lrxqthresh = 2;
92 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
95 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
97 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
99 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
103 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
105 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
110 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
111 RTE_PKTMBUF_HEADROOM);
113 /* Check if scattered RX needs to be used. */
114 if (rxq->max_pkt_len > buf_size)
115 dev->data->scattered_rx = 1;
117 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
119 /* Init the Rx tail register*/
120 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
125 /* Allocate mbufs for all descriptors in rx queue */
127 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
129 struct ice_rx_entry *rxe = rxq->sw_ring;
133 for (i = 0; i < rxq->nb_rx_desc; i++) {
134 volatile union ice_rx_desc *rxd;
135 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
137 if (unlikely(!mbuf)) {
138 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
142 rte_mbuf_refcnt_set(mbuf, 1);
144 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
146 mbuf->port = rxq->port_id;
149 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
151 rxd = &rxq->rx_ring[i];
152 rxd->read.pkt_addr = dma_addr;
153 rxd->read.hdr_addr = 0;
154 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
164 /* Free all mbufs for descriptors in rx queue */
166 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
170 if (!rxq || !rxq->sw_ring) {
171 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
175 for (i = 0; i < rxq->nb_rx_desc; i++) {
176 if (rxq->sw_ring[i].mbuf) {
177 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
178 rxq->sw_ring[i].mbuf = NULL;
181 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
182 if (rxq->rx_nb_avail == 0)
184 for (i = 0; i < rxq->rx_nb_avail; i++) {
185 struct rte_mbuf *mbuf;
187 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
188 rte_pktmbuf_free_seg(mbuf);
190 rxq->rx_nb_avail = 0;
191 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
195 ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
197 rxq->rx_rel_mbufs(rxq);
200 /* turn on or off rx queue
201 * @q_idx: queue index in pf scope
202 * @on: turn on or off the queue
205 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
210 /* QRX_CTRL = QRX_ENA */
211 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
214 if (reg & QRX_CTRL_QENA_STAT_M)
215 return 0; /* Already on, skip */
216 reg |= QRX_CTRL_QENA_REQ_M;
218 if (!(reg & QRX_CTRL_QENA_STAT_M))
219 return 0; /* Already off, skip */
220 reg &= ~QRX_CTRL_QENA_REQ_M;
223 /* Write the register */
224 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
225 /* Check the result. It is said that QENA_STAT
226 * follows the QENA_REQ not more than 10 use.
227 * TODO: need to change the wait counter later
229 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
230 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
231 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
233 if ((reg & QRX_CTRL_QENA_REQ_M) &&
234 (reg & QRX_CTRL_QENA_STAT_M))
237 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
238 !(reg & QRX_CTRL_QENA_STAT_M))
243 /* Check if it is timeout */
244 if (j >= ICE_CHK_Q_ENA_COUNT) {
245 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
246 (on ? "enable" : "disable"), q_idx);
254 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
255 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
257 ice_check_rx_burst_bulk_alloc_preconditions
258 (__rte_unused struct ice_rx_queue *rxq)
263 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
264 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
265 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
266 "rxq->rx_free_thresh=%d, "
267 "ICE_RX_MAX_BURST=%d",
268 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
270 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
271 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
272 "rxq->rx_free_thresh=%d, "
273 "rxq->nb_rx_desc=%d",
274 rxq->rx_free_thresh, rxq->nb_rx_desc);
276 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
277 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
278 "rxq->nb_rx_desc=%d, "
279 "rxq->rx_free_thresh=%d",
280 rxq->nb_rx_desc, rxq->rx_free_thresh);
290 /* reset fields in ice_rx_queue back to default */
292 ice_reset_rx_queue(struct ice_rx_queue *rxq)
298 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
302 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
303 if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
304 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
306 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
307 len = rxq->nb_rx_desc;
309 for (i = 0; i < len * sizeof(union ice_rx_desc); i++)
310 ((volatile char *)rxq->rx_ring)[i] = 0;
312 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
313 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
314 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
315 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
317 rxq->rx_nb_avail = 0;
318 rxq->rx_next_avail = 0;
319 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
320 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
324 rxq->pkt_first_seg = NULL;
325 rxq->pkt_last_seg = NULL;
327 rxq->rxrearm_start = 0;
332 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
334 struct ice_rx_queue *rxq;
336 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
338 PMD_INIT_FUNC_TRACE();
340 if (rx_queue_id >= dev->data->nb_rx_queues) {
341 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
342 rx_queue_id, dev->data->nb_rx_queues);
346 rxq = dev->data->rx_queues[rx_queue_id];
347 if (!rxq || !rxq->q_set) {
348 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
353 err = ice_program_hw_rx_queue(rxq);
355 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
360 err = ice_alloc_rx_queue_mbufs(rxq);
362 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
368 /* Init the RX tail register. */
369 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
371 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
373 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
376 ice_rx_queue_release_mbufs(rxq);
377 ice_reset_rx_queue(rxq);
381 dev->data->rx_queue_state[rx_queue_id] =
382 RTE_ETH_QUEUE_STATE_STARTED;
388 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
390 struct ice_rx_queue *rxq;
392 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
394 if (rx_queue_id < dev->data->nb_rx_queues) {
395 rxq = dev->data->rx_queues[rx_queue_id];
397 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
399 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
403 ice_rx_queue_release_mbufs(rxq);
404 ice_reset_rx_queue(rxq);
405 dev->data->rx_queue_state[rx_queue_id] =
406 RTE_ETH_QUEUE_STATE_STOPPED;
413 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
415 struct ice_tx_queue *txq;
419 struct ice_aqc_add_tx_qgrp txq_elem;
420 struct ice_tlan_ctx tx_ctx;
422 PMD_INIT_FUNC_TRACE();
424 if (tx_queue_id >= dev->data->nb_tx_queues) {
425 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
426 tx_queue_id, dev->data->nb_tx_queues);
430 txq = dev->data->tx_queues[tx_queue_id];
431 if (!txq || !txq->q_set) {
432 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
438 hw = ICE_VSI_TO_HW(vsi);
440 memset(&txq_elem, 0, sizeof(txq_elem));
441 memset(&tx_ctx, 0, sizeof(tx_ctx));
442 txq_elem.num_txqs = 1;
443 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
445 tx_ctx.base = txq->tx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
446 tx_ctx.qlen = txq->nb_tx_desc;
447 tx_ctx.pf_num = hw->pf_id;
448 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
449 tx_ctx.src_vsi = vsi->vsi_id;
450 tx_ctx.port_num = hw->port_info->lport;
451 tx_ctx.tso_ena = 1; /* tso enable */
452 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
453 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
455 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
458 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
460 /* Init the Tx tail register*/
461 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
463 /* Fix me, we assume TC always 0 here */
464 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
465 &txq_elem, sizeof(txq_elem), NULL);
467 PMD_DRV_LOG(ERR, "Failed to add lan txq");
470 /* store the schedule node id */
471 txq->q_teid = txq_elem.txqs[0].q_teid;
473 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
477 /* Free all mbufs for descriptors in tx queue */
479 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
483 if (!txq || !txq->sw_ring) {
484 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
488 for (i = 0; i < txq->nb_tx_desc; i++) {
489 if (txq->sw_ring[i].mbuf) {
490 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
491 txq->sw_ring[i].mbuf = NULL;
496 ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
498 txq->tx_rel_mbufs(txq);
502 ice_reset_tx_queue(struct ice_tx_queue *txq)
504 struct ice_tx_entry *txe;
505 uint16_t i, prev, size;
508 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
513 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
514 for (i = 0; i < size; i++)
515 ((volatile char *)txq->tx_ring)[i] = 0;
517 prev = (uint16_t)(txq->nb_tx_desc - 1);
518 for (i = 0; i < txq->nb_tx_desc; i++) {
519 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
521 txd->cmd_type_offset_bsz =
522 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
525 txe[prev].next_id = i;
529 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
530 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
535 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
536 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
540 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
542 struct ice_tx_queue *txq;
543 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
544 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
545 struct ice_vsi *vsi = pf->main_vsi;
546 enum ice_status status;
549 uint16_t q_handle = tx_queue_id;
551 if (tx_queue_id >= dev->data->nb_tx_queues) {
552 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
553 tx_queue_id, dev->data->nb_tx_queues);
557 txq = dev->data->tx_queues[tx_queue_id];
559 PMD_DRV_LOG(ERR, "TX queue %u is not available",
564 q_ids[0] = txq->reg_idx;
565 q_teids[0] = txq->q_teid;
567 /* Fix me, we assume TC always 0 here */
568 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
569 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
570 if (status != ICE_SUCCESS) {
571 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
575 ice_tx_queue_release_mbufs(txq);
576 ice_reset_tx_queue(txq);
577 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
583 ice_rx_queue_setup(struct rte_eth_dev *dev,
586 unsigned int socket_id,
587 const struct rte_eth_rxconf *rx_conf,
588 struct rte_mempool *mp)
590 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
591 struct ice_adapter *ad =
592 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
593 struct ice_vsi *vsi = pf->main_vsi;
594 struct ice_rx_queue *rxq;
595 const struct rte_memzone *rz;
598 int use_def_burst_func = 1;
600 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
601 nb_desc > ICE_MAX_RING_DESC ||
602 nb_desc < ICE_MIN_RING_DESC) {
603 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
608 /* Free memory if needed */
609 if (dev->data->rx_queues[queue_idx]) {
610 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
611 dev->data->rx_queues[queue_idx] = NULL;
614 /* Allocate the rx queue data structure */
615 rxq = rte_zmalloc_socket(NULL,
616 sizeof(struct ice_rx_queue),
620 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
621 "rx queue data structure");
625 rxq->nb_rx_desc = nb_desc;
626 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
627 rxq->queue_id = queue_idx;
629 rxq->reg_idx = vsi->base_queue + queue_idx;
630 rxq->port_id = dev->data->port_id;
631 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
632 rxq->crc_len = ETHER_CRC_LEN;
636 rxq->drop_en = rx_conf->rx_drop_en;
638 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
640 /* Allocate the maximun number of RX ring hardware descriptor. */
641 len = ICE_MAX_RING_DESC;
643 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
645 * Allocating a little more memory because vectorized/bulk_alloc Rx
646 * functions doesn't check boundaries each time.
648 len += ICE_RX_MAX_BURST;
651 /* Allocate the maximum number of RX ring hardware descriptor. */
652 ring_size = sizeof(union ice_rx_desc) * len;
653 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
654 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
655 ring_size, ICE_RING_BASE_ALIGN,
658 ice_rx_queue_release(rxq);
659 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
663 /* Zero all the descriptors in the ring. */
664 memset(rz->addr, 0, ring_size);
666 rxq->rx_ring_phys_addr = rz->phys_addr;
667 rxq->rx_ring = (union ice_rx_desc *)rz->addr;
669 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
670 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
675 /* Allocate the software ring. */
676 rxq->sw_ring = rte_zmalloc_socket(NULL,
677 sizeof(struct ice_rx_entry) * len,
681 ice_rx_queue_release(rxq);
682 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
686 ice_reset_rx_queue(rxq);
688 dev->data->rx_queues[queue_idx] = rxq;
689 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
691 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
693 if (!use_def_burst_func) {
694 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
695 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
696 "satisfied. Rx Burst Bulk Alloc function will be "
697 "used on port=%d, queue=%d.",
698 rxq->port_id, rxq->queue_id);
699 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
701 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
702 "not satisfied, Scattered Rx is requested, "
703 "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
704 "not enabled on port=%d, queue=%d.",
705 rxq->port_id, rxq->queue_id);
706 ad->rx_bulk_alloc_allowed = false;
713 ice_rx_queue_release(void *rxq)
715 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
718 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
722 ice_rx_queue_release_mbufs(q);
723 rte_free(q->sw_ring);
728 ice_tx_queue_setup(struct rte_eth_dev *dev,
731 unsigned int socket_id,
732 const struct rte_eth_txconf *tx_conf)
734 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
735 struct ice_vsi *vsi = pf->main_vsi;
736 struct ice_tx_queue *txq;
737 const struct rte_memzone *tz;
739 uint16_t tx_rs_thresh, tx_free_thresh;
742 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
744 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
745 nb_desc > ICE_MAX_RING_DESC ||
746 nb_desc < ICE_MIN_RING_DESC) {
747 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
753 * The following two parameters control the setting of the RS bit on
754 * transmit descriptors. TX descriptors will have their RS bit set
755 * after txq->tx_rs_thresh descriptors have been used. The TX
756 * descriptor ring will be cleaned after txq->tx_free_thresh
757 * descriptors are used or if the number of descriptors required to
758 * transmit a packet is greater than the number of free TX descriptors.
760 * The following constraints must be satisfied:
761 * - tx_rs_thresh must be greater than 0.
762 * - tx_rs_thresh must be less than the size of the ring minus 2.
763 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
764 * - tx_rs_thresh must be a divisor of the ring size.
765 * - tx_free_thresh must be greater than 0.
766 * - tx_free_thresh must be less than the size of the ring minus 3.
768 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
769 * race condition, hence the maximum threshold constraints. When set
770 * to zero use default values.
772 tx_rs_thresh = (uint16_t)(tx_conf->tx_rs_thresh ?
773 tx_conf->tx_rs_thresh :
774 ICE_DEFAULT_TX_RSBIT_THRESH);
775 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
776 tx_conf->tx_free_thresh :
777 ICE_DEFAULT_TX_FREE_THRESH);
778 if (tx_rs_thresh >= (nb_desc - 2)) {
779 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
780 "number of TX descriptors minus 2. "
781 "(tx_rs_thresh=%u port=%d queue=%d)",
782 (unsigned int)tx_rs_thresh,
783 (int)dev->data->port_id,
787 if (tx_free_thresh >= (nb_desc - 3)) {
788 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
789 "tx_free_thresh must be less than the "
790 "number of TX descriptors minus 3. "
791 "(tx_free_thresh=%u port=%d queue=%d)",
792 (unsigned int)tx_free_thresh,
793 (int)dev->data->port_id,
797 if (tx_rs_thresh > tx_free_thresh) {
798 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
799 "equal to tx_free_thresh. (tx_free_thresh=%u"
800 " tx_rs_thresh=%u port=%d queue=%d)",
801 (unsigned int)tx_free_thresh,
802 (unsigned int)tx_rs_thresh,
803 (int)dev->data->port_id,
807 if ((nb_desc % tx_rs_thresh) != 0) {
808 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
809 "number of TX descriptors. (tx_rs_thresh=%u"
810 " port=%d queue=%d)",
811 (unsigned int)tx_rs_thresh,
812 (int)dev->data->port_id,
816 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
817 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
818 "tx_rs_thresh is greater than 1. "
819 "(tx_rs_thresh=%u port=%d queue=%d)",
820 (unsigned int)tx_rs_thresh,
821 (int)dev->data->port_id,
826 /* Free memory if needed. */
827 if (dev->data->tx_queues[queue_idx]) {
828 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
829 dev->data->tx_queues[queue_idx] = NULL;
832 /* Allocate the TX queue data structure. */
833 txq = rte_zmalloc_socket(NULL,
834 sizeof(struct ice_tx_queue),
838 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
839 "tx queue structure");
843 /* Allocate TX hardware ring descriptors. */
844 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
845 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
846 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
847 ring_size, ICE_RING_BASE_ALIGN,
850 ice_tx_queue_release(txq);
851 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
855 txq->nb_tx_desc = nb_desc;
856 txq->tx_rs_thresh = tx_rs_thresh;
857 txq->tx_free_thresh = tx_free_thresh;
858 txq->pthresh = tx_conf->tx_thresh.pthresh;
859 txq->hthresh = tx_conf->tx_thresh.hthresh;
860 txq->wthresh = tx_conf->tx_thresh.wthresh;
861 txq->queue_id = queue_idx;
863 txq->reg_idx = vsi->base_queue + queue_idx;
864 txq->port_id = dev->data->port_id;
865 txq->offloads = offloads;
867 txq->tx_deferred_start = tx_conf->tx_deferred_start;
869 txq->tx_ring_phys_addr = tz->phys_addr;
870 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
872 /* Allocate software ring */
874 rte_zmalloc_socket(NULL,
875 sizeof(struct ice_tx_entry) * nb_desc,
879 ice_tx_queue_release(txq);
880 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
884 ice_reset_tx_queue(txq);
886 dev->data->tx_queues[queue_idx] = txq;
887 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
888 ice_set_tx_function_flag(dev, txq);
894 ice_tx_queue_release(void *txq)
896 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
899 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
903 ice_tx_queue_release_mbufs(q);
904 rte_free(q->sw_ring);
909 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
910 struct rte_eth_rxq_info *qinfo)
912 struct ice_rx_queue *rxq;
914 rxq = dev->data->rx_queues[queue_id];
917 qinfo->scattered_rx = dev->data->scattered_rx;
918 qinfo->nb_desc = rxq->nb_rx_desc;
920 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
921 qinfo->conf.rx_drop_en = rxq->drop_en;
922 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
926 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
927 struct rte_eth_txq_info *qinfo)
929 struct ice_tx_queue *txq;
931 txq = dev->data->tx_queues[queue_id];
933 qinfo->nb_desc = txq->nb_tx_desc;
935 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
936 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
937 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
939 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
940 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
941 qinfo->conf.offloads = txq->offloads;
942 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
946 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
948 #define ICE_RXQ_SCAN_INTERVAL 4
949 volatile union ice_rx_desc *rxdp;
950 struct ice_rx_queue *rxq;
953 rxq = dev->data->rx_queues[rx_queue_id];
954 rxdp = &rxq->rx_ring[rxq->rx_tail];
955 while ((desc < rxq->nb_rx_desc) &&
956 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
957 ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S) &
958 (1 << ICE_RX_DESC_STATUS_DD_S)) {
960 * Check the DD bit of a rx descriptor of each 4 in a group,
961 * to avoid checking too frequently and downgrading performance
964 desc += ICE_RXQ_SCAN_INTERVAL;
965 rxdp += ICE_RXQ_SCAN_INTERVAL;
966 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
967 rxdp = &(rxq->rx_ring[rxq->rx_tail +
968 desc - rxq->nb_rx_desc]);
974 /* Translate the rx descriptor status to pkt flags */
975 static inline uint64_t
976 ice_rxd_status_to_pkt_flags(uint64_t qword)
980 /* Check if RSS_HASH */
981 flags = (((qword >> ICE_RX_DESC_STATUS_FLTSTAT_S) &
982 ICE_RX_DESC_FLTSTAT_RSS_HASH) ==
983 ICE_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
988 /* Rx L3/L4 checksum */
989 static inline uint64_t
990 ice_rxd_error_to_pkt_flags(uint64_t qword)
993 uint64_t error_bits = (qword >> ICE_RXD_QW1_ERROR_S);
995 if (likely((error_bits & ICE_RX_ERR_BITS) == 0)) {
996 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1000 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_IPE_S)))
1001 flags |= PKT_RX_IP_CKSUM_BAD;
1003 flags |= PKT_RX_IP_CKSUM_GOOD;
1005 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_L4E_S)))
1006 flags |= PKT_RX_L4_CKSUM_BAD;
1008 flags |= PKT_RX_L4_CKSUM_GOOD;
1010 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_EIPE_S)))
1011 flags |= PKT_RX_EIP_CKSUM_BAD;
1017 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)
1019 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1020 (1 << ICE_RX_DESC_STATUS_L2TAG1P_S)) {
1021 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1023 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1024 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1025 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
1030 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1031 if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
1032 (1 << ICE_RX_DESC_EXT_STATUS_L2TAG2P_S)) {
1033 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1034 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1035 mb->vlan_tci_outer = mb->vlan_tci;
1036 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
1037 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1038 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
1039 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
1041 mb->vlan_tci_outer = 0;
1044 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1045 mb->vlan_tci, mb->vlan_tci_outer);
1048 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1049 #define ICE_LOOK_AHEAD 8
1050 #if (ICE_LOOK_AHEAD != 8)
1051 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1054 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1056 volatile union ice_rx_desc *rxdp;
1057 struct ice_rx_entry *rxep;
1058 struct rte_mbuf *mb;
1062 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1063 int32_t i, j, nb_rx = 0;
1064 uint64_t pkt_flags = 0;
1065 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1067 rxdp = &rxq->rx_ring[rxq->rx_tail];
1068 rxep = &rxq->sw_ring[rxq->rx_tail];
1070 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1071 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S;
1073 /* Make sure there is at least 1 packet to receive */
1074 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1078 * Scan LOOK_AHEAD descriptors at a time to determine which
1079 * descriptors reference packets that are ready to be received.
1081 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1082 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1083 /* Read desc statuses backwards to avoid race condition */
1084 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--) {
1085 qword1 = rte_le_to_cpu_64(
1086 rxdp[j].wb.qword1.status_error_len);
1087 s[j] = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1088 ICE_RXD_QW1_STATUS_S;
1093 /* Compute how many status bits were set */
1094 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1095 nb_dd += s[j] & (1 << ICE_RX_DESC_STATUS_DD_S);
1099 /* Translate descriptor info to mbuf parameters */
1100 for (j = 0; j < nb_dd; j++) {
1102 qword1 = rte_le_to_cpu_64(
1103 rxdp[j].wb.qword1.status_error_len);
1104 pkt_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1105 ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
1106 mb->data_len = pkt_len;
1107 mb->pkt_len = pkt_len;
1109 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1110 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1111 if (pkt_flags & PKT_RX_RSS_HASH)
1114 rxdp[j].wb.qword0.hi_dword.rss);
1115 mb->packet_type = ptype_tbl[(uint8_t)(
1117 ICE_RXD_QW1_PTYPE_M) >>
1118 ICE_RXD_QW1_PTYPE_S)];
1119 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1121 mb->ol_flags |= pkt_flags;
1124 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1125 rxq->rx_stage[i + j] = rxep[j].mbuf;
1127 if (nb_dd != ICE_LOOK_AHEAD)
1131 /* Clear software ring entries */
1132 for (i = 0; i < nb_rx; i++)
1133 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1135 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1136 "port_id=%u, queue_id=%u, nb_rx=%d",
1137 rxq->port_id, rxq->queue_id, nb_rx);
1142 static inline uint16_t
1143 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1144 struct rte_mbuf **rx_pkts,
1148 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1150 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1152 for (i = 0; i < nb_pkts; i++)
1153 rx_pkts[i] = stage[i];
1155 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1156 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1162 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1164 volatile union ice_rx_desc *rxdp;
1165 struct ice_rx_entry *rxep;
1166 struct rte_mbuf *mb;
1167 uint16_t alloc_idx, i;
1171 /* Allocate buffers in bulk */
1172 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1173 (rxq->rx_free_thresh - 1));
1174 rxep = &rxq->sw_ring[alloc_idx];
1175 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1176 rxq->rx_free_thresh);
1177 if (unlikely(diag != 0)) {
1178 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1182 rxdp = &rxq->rx_ring[alloc_idx];
1183 for (i = 0; i < rxq->rx_free_thresh; i++) {
1184 if (likely(i < (rxq->rx_free_thresh - 1)))
1185 /* Prefetch next mbuf */
1186 rte_prefetch0(rxep[i + 1].mbuf);
1189 rte_mbuf_refcnt_set(mb, 1);
1191 mb->data_off = RTE_PKTMBUF_HEADROOM;
1193 mb->port = rxq->port_id;
1194 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1195 rxdp[i].read.hdr_addr = 0;
1196 rxdp[i].read.pkt_addr = dma_addr;
1199 /* Update rx tail regsiter */
1201 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1203 rxq->rx_free_trigger =
1204 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1205 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1206 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1211 static inline uint16_t
1212 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1214 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1216 struct rte_eth_dev *dev;
1221 if (rxq->rx_nb_avail)
1222 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1224 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1225 rxq->rx_next_avail = 0;
1226 rxq->rx_nb_avail = nb_rx;
1227 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1229 if (rxq->rx_tail > rxq->rx_free_trigger) {
1230 if (ice_rx_alloc_bufs(rxq) != 0) {
1233 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1234 dev->data->rx_mbuf_alloc_failed +=
1235 rxq->rx_free_thresh;
1236 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1237 "port_id=%u, queue_id=%u",
1238 rxq->port_id, rxq->queue_id);
1239 rxq->rx_nb_avail = 0;
1240 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1241 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1242 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1248 if (rxq->rx_tail >= rxq->nb_rx_desc)
1251 if (rxq->rx_nb_avail)
1252 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1258 ice_recv_pkts_bulk_alloc(void *rx_queue,
1259 struct rte_mbuf **rx_pkts,
1266 if (unlikely(nb_pkts == 0))
1269 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1270 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1273 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1274 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1275 nb_rx = (uint16_t)(nb_rx + count);
1276 nb_pkts = (uint16_t)(nb_pkts - count);
1285 ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
1286 struct rte_mbuf __rte_unused **rx_pkts,
1287 uint16_t __rte_unused nb_pkts)
1291 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
1294 ice_recv_scattered_pkts(void *rx_queue,
1295 struct rte_mbuf **rx_pkts,
1298 struct ice_rx_queue *rxq = rx_queue;
1299 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1300 volatile union ice_rx_desc *rxdp;
1301 union ice_rx_desc rxd;
1302 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1303 struct ice_rx_entry *rxe;
1304 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1305 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1306 struct rte_mbuf *nmb; /* new allocated mbuf */
1307 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1308 uint16_t rx_id = rxq->rx_tail;
1310 uint16_t nb_hold = 0;
1311 uint16_t rx_packet_len;
1315 uint64_t pkt_flags = 0;
1316 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1317 struct rte_eth_dev *dev;
1319 while (nb_rx < nb_pkts) {
1320 rxdp = &rx_ring[rx_id];
1321 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1322 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1323 ICE_RXD_QW1_STATUS_S;
1325 /* Check the DD bit first */
1326 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1330 nmb = rte_mbuf_raw_alloc(rxq->mp);
1331 if (unlikely(!nmb)) {
1332 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1333 dev->data->rx_mbuf_alloc_failed++;
1336 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1339 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1341 if (unlikely(rx_id == rxq->nb_rx_desc))
1344 /* Prefetch next mbuf */
1345 rte_prefetch0(sw_ring[rx_id].mbuf);
1348 * When next RX descriptor is on a cache line boundary,
1349 * prefetch the next 4 RX descriptors and next 8 pointers
1352 if ((rx_id & 0x3) == 0) {
1353 rte_prefetch0(&rx_ring[rx_id]);
1354 rte_prefetch0(&sw_ring[rx_id]);
1360 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1362 /* Set data buffer address and data length of the mbuf */
1363 rxdp->read.hdr_addr = 0;
1364 rxdp->read.pkt_addr = dma_addr;
1365 rx_packet_len = (qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1366 ICE_RXD_QW1_LEN_PBUF_S;
1367 rxm->data_len = rx_packet_len;
1368 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1369 ice_rxd_to_vlan_tci(rxm, rxdp);
1370 rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
1371 ICE_RXD_QW1_PTYPE_M) >>
1372 ICE_RXD_QW1_PTYPE_S)];
1375 * If this is the first buffer of the received packet, set the
1376 * pointer to the first mbuf of the packet and initialize its
1377 * context. Otherwise, update the total length and the number
1378 * of segments of the current scattered packet, and update the
1379 * pointer to the last mbuf of the current packet.
1383 first_seg->nb_segs = 1;
1384 first_seg->pkt_len = rx_packet_len;
1386 first_seg->pkt_len =
1387 (uint16_t)(first_seg->pkt_len +
1389 first_seg->nb_segs++;
1390 last_seg->next = rxm;
1394 * If this is not the last buffer of the received packet,
1395 * update the pointer to the last mbuf of the current scattered
1396 * packet and continue to parse the RX ring.
1398 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_EOF_S))) {
1404 * This is the last buffer of the received packet. If the CRC
1405 * is not stripped by the hardware:
1406 * - Subtract the CRC length from the total packet length.
1407 * - If the last buffer only contains the whole CRC or a part
1408 * of it, free the mbuf associated to the last buffer. If part
1409 * of the CRC is also contained in the previous mbuf, subtract
1410 * the length of that CRC part from the data length of the
1414 if (unlikely(rxq->crc_len > 0)) {
1415 first_seg->pkt_len -= ETHER_CRC_LEN;
1416 if (rx_packet_len <= ETHER_CRC_LEN) {
1417 rte_pktmbuf_free_seg(rxm);
1418 first_seg->nb_segs--;
1419 last_seg->data_len =
1420 (uint16_t)(last_seg->data_len -
1421 (ETHER_CRC_LEN - rx_packet_len));
1422 last_seg->next = NULL;
1424 rxm->data_len = (uint16_t)(rx_packet_len -
1428 first_seg->port = rxq->port_id;
1429 first_seg->ol_flags = 0;
1431 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1432 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1433 if (pkt_flags & PKT_RX_RSS_HASH)
1434 first_seg->hash.rss =
1435 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1437 first_seg->ol_flags |= pkt_flags;
1438 /* Prefetch data of first segment, if configured to do so. */
1439 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1440 first_seg->data_off));
1441 rx_pkts[nb_rx++] = first_seg;
1445 /* Record index of the next RX descriptor to probe. */
1446 rxq->rx_tail = rx_id;
1447 rxq->pkt_first_seg = first_seg;
1448 rxq->pkt_last_seg = last_seg;
1451 * If the number of free RX descriptors is greater than the RX free
1452 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1453 * register. Update the RDT with the value of the last processed RX
1454 * descriptor minus 1, to guarantee that the RDT register is never
1455 * equal to the RDH register, which creates a "full" ring situtation
1456 * from the hardware point of view.
1458 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1459 if (nb_hold > rxq->rx_free_thresh) {
1460 rx_id = (uint16_t)(rx_id == 0 ?
1461 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1462 /* write TAIL register */
1463 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1466 rxq->nb_rx_hold = nb_hold;
1468 /* return received packet in the burst */
1473 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1475 static const uint32_t ptypes[] = {
1476 /* refers to ice_get_default_pkt_type() */
1478 RTE_PTYPE_L2_ETHER_LLDP,
1479 RTE_PTYPE_L2_ETHER_ARP,
1480 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1481 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1484 RTE_PTYPE_L4_NONFRAG,
1488 RTE_PTYPE_TUNNEL_GRENAT,
1489 RTE_PTYPE_TUNNEL_IP,
1490 RTE_PTYPE_INNER_L2_ETHER,
1491 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1492 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1493 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1494 RTE_PTYPE_INNER_L4_FRAG,
1495 RTE_PTYPE_INNER_L4_ICMP,
1496 RTE_PTYPE_INNER_L4_NONFRAG,
1497 RTE_PTYPE_INNER_L4_SCTP,
1498 RTE_PTYPE_INNER_L4_TCP,
1499 RTE_PTYPE_INNER_L4_UDP,
1500 RTE_PTYPE_TUNNEL_GTPC,
1501 RTE_PTYPE_TUNNEL_GTPU,
1505 if (dev->rx_pkt_burst == ice_recv_pkts ||
1506 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1507 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1509 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1513 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1514 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1515 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1516 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1524 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1526 struct ice_rx_queue *rxq = rx_queue;
1527 volatile uint64_t *status;
1531 if (unlikely(offset >= rxq->nb_rx_desc))
1534 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1535 return RTE_ETH_RX_DESC_UNAVAIL;
1537 desc = rxq->rx_tail + offset;
1538 if (desc >= rxq->nb_rx_desc)
1539 desc -= rxq->nb_rx_desc;
1541 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
1542 mask = rte_cpu_to_le_64((1ULL << ICE_RX_DESC_STATUS_DD_S) <<
1543 ICE_RXD_QW1_STATUS_S);
1545 return RTE_ETH_RX_DESC_DONE;
1547 return RTE_ETH_RX_DESC_AVAIL;
1551 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1553 struct ice_tx_queue *txq = tx_queue;
1554 volatile uint64_t *status;
1555 uint64_t mask, expect;
1558 if (unlikely(offset >= txq->nb_tx_desc))
1561 desc = txq->tx_tail + offset;
1562 /* go to next desc that has the RS bit */
1563 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1565 if (desc >= txq->nb_tx_desc) {
1566 desc -= txq->nb_tx_desc;
1567 if (desc >= txq->nb_tx_desc)
1568 desc -= txq->nb_tx_desc;
1571 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1572 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1573 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1574 ICE_TXD_QW1_DTYPE_S);
1575 if ((*status & mask) == expect)
1576 return RTE_ETH_TX_DESC_DONE;
1578 return RTE_ETH_TX_DESC_FULL;
1582 ice_clear_queues(struct rte_eth_dev *dev)
1586 PMD_INIT_FUNC_TRACE();
1588 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1589 ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
1590 ice_reset_tx_queue(dev->data->tx_queues[i]);
1593 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1594 ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
1595 ice_reset_rx_queue(dev->data->rx_queues[i]);
1600 ice_free_queues(struct rte_eth_dev *dev)
1604 PMD_INIT_FUNC_TRACE();
1606 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1607 if (!dev->data->rx_queues[i])
1609 ice_rx_queue_release(dev->data->rx_queues[i]);
1610 dev->data->rx_queues[i] = NULL;
1612 dev->data->nb_rx_queues = 0;
1614 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1615 if (!dev->data->tx_queues[i])
1617 ice_tx_queue_release(dev->data->tx_queues[i]);
1618 dev->data->tx_queues[i] = NULL;
1620 dev->data->nb_tx_queues = 0;
1624 ice_recv_pkts(void *rx_queue,
1625 struct rte_mbuf **rx_pkts,
1628 struct ice_rx_queue *rxq = rx_queue;
1629 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1630 volatile union ice_rx_desc *rxdp;
1631 union ice_rx_desc rxd;
1632 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1633 struct ice_rx_entry *rxe;
1634 struct rte_mbuf *nmb; /* new allocated mbuf */
1635 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1636 uint16_t rx_id = rxq->rx_tail;
1638 uint16_t nb_hold = 0;
1639 uint16_t rx_packet_len;
1643 uint64_t pkt_flags = 0;
1644 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1645 struct rte_eth_dev *dev;
1647 while (nb_rx < nb_pkts) {
1648 rxdp = &rx_ring[rx_id];
1649 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1650 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1651 ICE_RXD_QW1_STATUS_S;
1653 /* Check the DD bit first */
1654 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1658 nmb = rte_mbuf_raw_alloc(rxq->mp);
1659 if (unlikely(!nmb)) {
1660 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1661 dev->data->rx_mbuf_alloc_failed++;
1664 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1667 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1669 if (unlikely(rx_id == rxq->nb_rx_desc))
1674 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1677 * fill the read format of descriptor with physic address in
1678 * new allocated mbuf: nmb
1680 rxdp->read.hdr_addr = 0;
1681 rxdp->read.pkt_addr = dma_addr;
1683 /* calculate rx_packet_len of the received pkt */
1684 rx_packet_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1685 ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
1687 /* fill old mbuf with received descriptor: rxd */
1688 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1689 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1692 rxm->pkt_len = rx_packet_len;
1693 rxm->data_len = rx_packet_len;
1694 rxm->port = rxq->port_id;
1695 ice_rxd_to_vlan_tci(rxm, rxdp);
1696 rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
1697 ICE_RXD_QW1_PTYPE_M) >>
1698 ICE_RXD_QW1_PTYPE_S)];
1699 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1700 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1701 if (pkt_flags & PKT_RX_RSS_HASH)
1703 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1704 rxm->ol_flags |= pkt_flags;
1705 /* copy old mbuf to rx_pkts */
1706 rx_pkts[nb_rx++] = rxm;
1708 rxq->rx_tail = rx_id;
1710 * If the number of free RX descriptors is greater than the RX free
1711 * threshold of the queue, advance the receive tail register of queue.
1712 * Update that register with the value of the last processed RX
1713 * descriptor minus 1.
1715 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1716 if (nb_hold > rxq->rx_free_thresh) {
1717 rx_id = (uint16_t)(rx_id == 0 ?
1718 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1719 /* write TAIL register */
1720 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1723 rxq->nb_rx_hold = nb_hold;
1725 /* return received packet in the burst */
1730 ice_txd_enable_checksum(uint64_t ol_flags,
1732 uint32_t *td_offset,
1733 union ice_tx_offload tx_offload)
1735 /* L2 length must be set. */
1736 *td_offset |= (tx_offload.l2_len >> 1) <<
1737 ICE_TX_DESC_LEN_MACLEN_S;
1739 /* Enable L3 checksum offloads */
1740 if (ol_flags & PKT_TX_IP_CKSUM) {
1741 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1742 *td_offset |= (tx_offload.l3_len >> 2) <<
1743 ICE_TX_DESC_LEN_IPLEN_S;
1744 } else if (ol_flags & PKT_TX_IPV4) {
1745 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1746 *td_offset |= (tx_offload.l3_len >> 2) <<
1747 ICE_TX_DESC_LEN_IPLEN_S;
1748 } else if (ol_flags & PKT_TX_IPV6) {
1749 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1750 *td_offset |= (tx_offload.l3_len >> 2) <<
1751 ICE_TX_DESC_LEN_IPLEN_S;
1754 if (ol_flags & PKT_TX_TCP_SEG) {
1755 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1756 *td_offset |= (tx_offload.l4_len >> 2) <<
1757 ICE_TX_DESC_LEN_L4_LEN_S;
1761 /* Enable L4 checksum offloads */
1762 switch (ol_flags & PKT_TX_L4_MASK) {
1763 case PKT_TX_TCP_CKSUM:
1764 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1765 *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
1766 ICE_TX_DESC_LEN_L4_LEN_S;
1768 case PKT_TX_SCTP_CKSUM:
1769 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1770 *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
1771 ICE_TX_DESC_LEN_L4_LEN_S;
1773 case PKT_TX_UDP_CKSUM:
1774 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1775 *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
1776 ICE_TX_DESC_LEN_L4_LEN_S;
1784 ice_xmit_cleanup(struct ice_tx_queue *txq)
1786 struct ice_tx_entry *sw_ring = txq->sw_ring;
1787 volatile struct ice_tx_desc *txd = txq->tx_ring;
1788 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1789 uint16_t nb_tx_desc = txq->nb_tx_desc;
1790 uint16_t desc_to_clean_to;
1791 uint16_t nb_tx_to_clean;
1793 /* Determine the last descriptor needing to be cleaned */
1794 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
1795 if (desc_to_clean_to >= nb_tx_desc)
1796 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1798 /* Check to make sure the last descriptor to clean is done */
1799 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1800 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
1801 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
1802 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1803 "(port=%d queue=%d) value=0x%"PRIx64"\n",
1805 txq->port_id, txq->queue_id,
1806 txd[desc_to_clean_to].cmd_type_offset_bsz);
1807 /* Failed to clean any descriptors */
1811 /* Figure out how many descriptors will be cleaned */
1812 if (last_desc_cleaned > desc_to_clean_to)
1813 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1816 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1819 /* The last descriptor to clean is done, so that means all the
1820 * descriptors from the last descriptor that was cleaned
1821 * up to the last descriptor with the RS bit set
1822 * are done. Only reset the threshold descriptor.
1824 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1826 /* Update the txq to reflect the last descriptor that was cleaned */
1827 txq->last_desc_cleaned = desc_to_clean_to;
1828 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
1833 /* Construct the tx flags */
1834 static inline uint64_t
1835 ice_build_ctob(uint32_t td_cmd,
1840 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
1841 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
1842 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
1843 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1844 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
1847 /* Check if the context descriptor is needed for TX offloading */
1848 static inline uint16_t
1849 ice_calc_context_desc(uint64_t flags)
1851 static uint64_t mask = PKT_TX_TCP_SEG | PKT_TX_QINQ;
1853 return (flags & mask) ? 1 : 0;
1856 /* set ice TSO context descriptor */
1857 static inline uint64_t
1858 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
1860 uint64_t ctx_desc = 0;
1861 uint32_t cd_cmd, hdr_len, cd_tso_len;
1863 if (!tx_offload.l4_len) {
1864 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1869 * in case of non tunneling packet, the outer_l2_len and
1870 * outer_l3_len must be 0.
1872 hdr_len = tx_offload.outer_l2_len +
1873 tx_offload.outer_l3_len +
1878 cd_cmd = ICE_TX_CTX_DESC_TSO;
1879 cd_tso_len = mbuf->pkt_len - hdr_len;
1880 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
1881 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1882 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
1888 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1890 struct ice_tx_queue *txq;
1891 volatile struct ice_tx_desc *tx_ring;
1892 volatile struct ice_tx_desc *txd;
1893 struct ice_tx_entry *sw_ring;
1894 struct ice_tx_entry *txe, *txn;
1895 struct rte_mbuf *tx_pkt;
1896 struct rte_mbuf *m_seg;
1901 uint32_t td_cmd = 0;
1902 uint32_t td_offset = 0;
1903 uint32_t td_tag = 0;
1905 uint64_t buf_dma_addr;
1907 union ice_tx_offload tx_offload = {0};
1910 sw_ring = txq->sw_ring;
1911 tx_ring = txq->tx_ring;
1912 tx_id = txq->tx_tail;
1913 txe = &sw_ring[tx_id];
1915 /* Check if the descriptor ring needs to be cleaned. */
1916 if (txq->nb_tx_free < txq->tx_free_thresh)
1917 ice_xmit_cleanup(txq);
1919 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1920 tx_pkt = *tx_pkts++;
1923 ol_flags = tx_pkt->ol_flags;
1924 tx_offload.l2_len = tx_pkt->l2_len;
1925 tx_offload.l3_len = tx_pkt->l3_len;
1926 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
1927 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
1928 tx_offload.l4_len = tx_pkt->l4_len;
1929 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1930 /* Calculate the number of context descriptors needed. */
1931 nb_ctx = ice_calc_context_desc(ol_flags);
1933 /* The number of descriptors that must be allocated for
1934 * a packet equals to the number of the segments of that
1935 * packet plus the number of context descriptor if needed.
1937 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1938 tx_last = (uint16_t)(tx_id + nb_used - 1);
1941 if (tx_last >= txq->nb_tx_desc)
1942 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1944 if (nb_used > txq->nb_tx_free) {
1945 if (ice_xmit_cleanup(txq) != 0) {
1950 if (unlikely(nb_used > txq->tx_rs_thresh)) {
1951 while (nb_used > txq->nb_tx_free) {
1952 if (ice_xmit_cleanup(txq) != 0) {
1961 /* Descriptor based VLAN insertion */
1962 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
1963 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
1964 td_tag = tx_pkt->vlan_tci;
1967 /* Enable checksum offloading */
1968 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
1969 ice_txd_enable_checksum(ol_flags, &td_cmd,
1970 &td_offset, tx_offload);
1974 /* Setup TX context descriptor if required */
1975 volatile struct ice_tx_ctx_desc *ctx_txd =
1976 (volatile struct ice_tx_ctx_desc *)
1978 uint16_t cd_l2tag2 = 0;
1979 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
1981 txn = &sw_ring[txe->next_id];
1982 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1984 rte_pktmbuf_free_seg(txe->mbuf);
1988 if (ol_flags & PKT_TX_TCP_SEG)
1989 cd_type_cmd_tso_mss |=
1990 ice_set_tso_ctx(tx_pkt, tx_offload);
1992 /* TX context descriptor based double VLAN insert */
1993 if (ol_flags & PKT_TX_QINQ) {
1994 cd_l2tag2 = tx_pkt->vlan_tci_outer;
1995 cd_type_cmd_tso_mss |=
1996 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
1997 ICE_TXD_CTX_QW1_CMD_S);
1999 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2001 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2003 txe->last_id = tx_last;
2004 tx_id = txe->next_id;
2010 txd = &tx_ring[tx_id];
2011 txn = &sw_ring[txe->next_id];
2014 rte_pktmbuf_free_seg(txe->mbuf);
2017 /* Setup TX Descriptor */
2018 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2019 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2020 txd->cmd_type_offset_bsz =
2021 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2022 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2023 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2024 ((uint64_t)m_seg->data_len <<
2025 ICE_TXD_QW1_TX_BUF_SZ_S) |
2026 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2028 txe->last_id = tx_last;
2029 tx_id = txe->next_id;
2031 m_seg = m_seg->next;
2034 /* fill the last descriptor with End of Packet (EOP) bit */
2035 td_cmd |= ICE_TX_DESC_CMD_EOP;
2036 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2037 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2039 /* set RS bit on the last descriptor of one packet */
2040 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2041 PMD_TX_FREE_LOG(DEBUG,
2042 "Setting RS bit on TXD id="
2043 "%4u (port=%d queue=%d)",
2044 tx_last, txq->port_id, txq->queue_id);
2046 td_cmd |= ICE_TX_DESC_CMD_RS;
2048 /* Update txq RS bit counters */
2049 txq->nb_tx_used = 0;
2051 txd->cmd_type_offset_bsz |=
2052 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2058 /* update Tail register */
2059 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2060 txq->tx_tail = tx_id;
2065 static inline int __attribute__((always_inline))
2066 ice_tx_free_bufs(struct ice_tx_queue *txq)
2068 struct ice_tx_entry *txep;
2071 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2072 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2073 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2076 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2078 for (i = 0; i < txq->tx_rs_thresh; i++)
2079 rte_prefetch0((txep + i)->mbuf);
2081 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2082 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2083 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2087 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2088 rte_pktmbuf_free_seg(txep->mbuf);
2093 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2094 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2095 if (txq->tx_next_dd >= txq->nb_tx_desc)
2096 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2098 return txq->tx_rs_thresh;
2101 /* Populate 4 descriptors with data from 4 mbufs */
2103 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2108 for (i = 0; i < 4; i++, txdp++, pkts++) {
2109 dma_addr = rte_mbuf_data_iova(*pkts);
2110 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2111 txdp->cmd_type_offset_bsz =
2112 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2113 (*pkts)->data_len, 0);
2117 /* Populate 1 descriptor with data from 1 mbuf */
2119 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2123 dma_addr = rte_mbuf_data_iova(*pkts);
2124 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2125 txdp->cmd_type_offset_bsz =
2126 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2127 (*pkts)->data_len, 0);
2131 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2134 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2135 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2136 const int N_PER_LOOP = 4;
2137 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2138 int mainpart, leftover;
2142 * Process most of the packets in chunks of N pkts. Any
2143 * leftover packets will get processed one at a time.
2145 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2146 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2147 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2148 /* Copy N mbuf pointers to the S/W ring */
2149 for (j = 0; j < N_PER_LOOP; ++j)
2150 (txep + i + j)->mbuf = *(pkts + i + j);
2151 tx4(txdp + i, pkts + i);
2154 if (unlikely(leftover > 0)) {
2155 for (i = 0; i < leftover; ++i) {
2156 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2157 tx1(txdp + mainpart + i, pkts + mainpart + i);
2162 static inline uint16_t
2163 tx_xmit_pkts(struct ice_tx_queue *txq,
2164 struct rte_mbuf **tx_pkts,
2167 volatile struct ice_tx_desc *txr = txq->tx_ring;
2171 * Begin scanning the H/W ring for done descriptors when the number
2172 * of available descriptors drops below tx_free_thresh. For each done
2173 * descriptor, free the associated buffer.
2175 if (txq->nb_tx_free < txq->tx_free_thresh)
2176 ice_tx_free_bufs(txq);
2178 /* Use available descriptor only */
2179 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2180 if (unlikely(!nb_pkts))
2183 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2184 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2185 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2186 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2187 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2188 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2190 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2194 /* Fill hardware descriptor ring with mbuf data */
2195 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2196 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2198 /* Determin if RS bit needs to be set */
2199 if (txq->tx_tail > txq->tx_next_rs) {
2200 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2201 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2204 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2205 if (txq->tx_next_rs >= txq->nb_tx_desc)
2206 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2209 if (txq->tx_tail >= txq->nb_tx_desc)
2212 /* Update the tx tail register */
2214 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2220 ice_xmit_pkts_simple(void *tx_queue,
2221 struct rte_mbuf **tx_pkts,
2226 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2227 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2231 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2234 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2235 &tx_pkts[nb_tx], num);
2236 nb_tx = (uint16_t)(nb_tx + ret);
2237 nb_pkts = (uint16_t)(nb_pkts - ret);
2245 void __attribute__((cold))
2246 ice_set_rx_function(struct rte_eth_dev *dev)
2248 PMD_INIT_FUNC_TRACE();
2249 struct ice_adapter *ad =
2250 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2252 struct ice_rx_queue *rxq;
2254 bool use_avx2 = false;
2256 if (!ice_rx_vec_dev_check(dev)) {
2257 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2258 rxq = dev->data->rx_queues[i];
2259 (void)ice_rxq_vec_setup(rxq);
2262 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2263 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2266 if (dev->data->scattered_rx) {
2268 "Using %sVector Scattered Rx (port %d).",
2269 use_avx2 ? "avx2 " : "",
2270 dev->data->port_id);
2271 dev->rx_pkt_burst = use_avx2 ?
2272 ice_recv_scattered_pkts_vec_avx2 :
2273 ice_recv_scattered_pkts_vec;
2275 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2276 use_avx2 ? "avx2 " : "",
2277 dev->data->port_id);
2278 dev->rx_pkt_burst = use_avx2 ?
2279 ice_recv_pkts_vec_avx2 :
2287 if (dev->data->scattered_rx) {
2288 /* Set the non-LRO scattered function */
2290 "Using a Scattered function on port %d.",
2291 dev->data->port_id);
2292 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2293 } else if (ad->rx_bulk_alloc_allowed) {
2295 "Rx Burst Bulk Alloc Preconditions are "
2296 "satisfied. Rx Burst Bulk Alloc function "
2297 "will be used on port %d.",
2298 dev->data->port_id);
2299 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2302 "Rx Burst Bulk Alloc Preconditions are not "
2303 "satisfied, Normal Rx will be used on port %d.",
2304 dev->data->port_id);
2305 dev->rx_pkt_burst = ice_recv_pkts;
2309 void __attribute__((cold))
2310 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
2312 struct ice_adapter *ad =
2313 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2315 /* Use a simple Tx queue if possible (only fast free is allowed) */
2316 ad->tx_simple_allowed =
2318 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
2319 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
2321 if (ad->tx_simple_allowed)
2322 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
2326 "Simple Tx can NOT be enabled on Tx queue %u.",
2330 /*********************************************************************
2334 **********************************************************************/
2335 /* The default values of TSO MSS */
2336 #define ICE_MIN_TSO_MSS 64
2337 #define ICE_MAX_TSO_MSS 9728
2338 #define ICE_MAX_TSO_FRAME_SIZE 262144
2340 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2347 for (i = 0; i < nb_pkts; i++) {
2349 ol_flags = m->ol_flags;
2351 if (ol_flags & PKT_TX_TCP_SEG &&
2352 (m->tso_segsz < ICE_MIN_TSO_MSS ||
2353 m->tso_segsz > ICE_MAX_TSO_MSS ||
2354 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
2356 * MSS outside the range are considered malicious
2358 rte_errno = -EINVAL;
2362 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2363 ret = rte_validate_tx_offload(m);
2369 ret = rte_net_intel_cksum_prepare(m);
2378 void __attribute__((cold))
2379 ice_set_tx_function(struct rte_eth_dev *dev)
2381 struct ice_adapter *ad =
2382 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2384 struct ice_tx_queue *txq;
2386 bool use_avx2 = false;
2388 if (!ice_tx_vec_dev_check(dev)) {
2389 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2390 txq = dev->data->tx_queues[i];
2391 (void)ice_txq_vec_setup(txq);
2394 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2395 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2398 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2399 use_avx2 ? "avx2 " : "",
2400 dev->data->port_id);
2401 dev->tx_pkt_burst = use_avx2 ?
2402 ice_xmit_pkts_vec_avx2 :
2404 dev->tx_pkt_prepare = NULL;
2410 if (ad->tx_simple_allowed) {
2411 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
2412 dev->tx_pkt_burst = ice_xmit_pkts_simple;
2413 dev->tx_pkt_prepare = NULL;
2415 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
2416 dev->tx_pkt_burst = ice_xmit_pkts;
2417 dev->tx_pkt_prepare = ice_prep_pkts;
2421 /* For each value it means, datasheet of hardware can tell more details
2423 * @note: fix ice_dev_supported_ptypes_get() if any change here.
2425 static inline uint32_t
2426 ice_get_default_pkt_type(uint16_t ptype)
2428 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
2429 __rte_cache_aligned = {
2432 [1] = RTE_PTYPE_L2_ETHER,
2433 /* [2] - [5] reserved */
2434 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2435 /* [7] - [10] reserved */
2436 [11] = RTE_PTYPE_L2_ETHER_ARP,
2437 /* [12] - [21] reserved */
2439 /* Non tunneled IPv4 */
2440 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2442 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2443 RTE_PTYPE_L4_NONFRAG,
2444 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2447 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2449 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2451 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2455 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2456 RTE_PTYPE_TUNNEL_IP |
2457 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2458 RTE_PTYPE_INNER_L4_FRAG,
2459 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2460 RTE_PTYPE_TUNNEL_IP |
2461 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2462 RTE_PTYPE_INNER_L4_NONFRAG,
2463 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2464 RTE_PTYPE_TUNNEL_IP |
2465 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2466 RTE_PTYPE_INNER_L4_UDP,
2468 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2469 RTE_PTYPE_TUNNEL_IP |
2470 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2471 RTE_PTYPE_INNER_L4_TCP,
2472 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2473 RTE_PTYPE_TUNNEL_IP |
2474 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2475 RTE_PTYPE_INNER_L4_SCTP,
2476 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2477 RTE_PTYPE_TUNNEL_IP |
2478 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2479 RTE_PTYPE_INNER_L4_ICMP,
2482 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2483 RTE_PTYPE_TUNNEL_IP |
2484 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2485 RTE_PTYPE_INNER_L4_FRAG,
2486 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2487 RTE_PTYPE_TUNNEL_IP |
2488 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2489 RTE_PTYPE_INNER_L4_NONFRAG,
2490 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2491 RTE_PTYPE_TUNNEL_IP |
2492 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2493 RTE_PTYPE_INNER_L4_UDP,
2495 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2496 RTE_PTYPE_TUNNEL_IP |
2497 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2498 RTE_PTYPE_INNER_L4_TCP,
2499 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2500 RTE_PTYPE_TUNNEL_IP |
2501 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2502 RTE_PTYPE_INNER_L4_SCTP,
2503 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2504 RTE_PTYPE_TUNNEL_IP |
2505 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2506 RTE_PTYPE_INNER_L4_ICMP,
2508 /* IPv4 --> GRE/Teredo/VXLAN */
2509 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2510 RTE_PTYPE_TUNNEL_GRENAT,
2512 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2513 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2514 RTE_PTYPE_TUNNEL_GRENAT |
2515 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2516 RTE_PTYPE_INNER_L4_FRAG,
2517 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2518 RTE_PTYPE_TUNNEL_GRENAT |
2519 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2520 RTE_PTYPE_INNER_L4_NONFRAG,
2521 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2522 RTE_PTYPE_TUNNEL_GRENAT |
2523 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2524 RTE_PTYPE_INNER_L4_UDP,
2526 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2527 RTE_PTYPE_TUNNEL_GRENAT |
2528 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2529 RTE_PTYPE_INNER_L4_TCP,
2530 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2531 RTE_PTYPE_TUNNEL_GRENAT |
2532 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2533 RTE_PTYPE_INNER_L4_SCTP,
2534 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2535 RTE_PTYPE_TUNNEL_GRENAT |
2536 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2537 RTE_PTYPE_INNER_L4_ICMP,
2539 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2540 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2541 RTE_PTYPE_TUNNEL_GRENAT |
2542 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2543 RTE_PTYPE_INNER_L4_FRAG,
2544 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2545 RTE_PTYPE_TUNNEL_GRENAT |
2546 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2547 RTE_PTYPE_INNER_L4_NONFRAG,
2548 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2549 RTE_PTYPE_TUNNEL_GRENAT |
2550 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2551 RTE_PTYPE_INNER_L4_UDP,
2553 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2554 RTE_PTYPE_TUNNEL_GRENAT |
2555 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2556 RTE_PTYPE_INNER_L4_TCP,
2557 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2558 RTE_PTYPE_TUNNEL_GRENAT |
2559 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2560 RTE_PTYPE_INNER_L4_SCTP,
2561 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2562 RTE_PTYPE_TUNNEL_GRENAT |
2563 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2564 RTE_PTYPE_INNER_L4_ICMP,
2566 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2567 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2568 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2570 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2571 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2572 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2573 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2574 RTE_PTYPE_INNER_L4_FRAG,
2575 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2576 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2577 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2578 RTE_PTYPE_INNER_L4_NONFRAG,
2579 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2580 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2581 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2582 RTE_PTYPE_INNER_L4_UDP,
2584 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2585 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2586 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2587 RTE_PTYPE_INNER_L4_TCP,
2588 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2589 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2590 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2591 RTE_PTYPE_INNER_L4_SCTP,
2592 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2593 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2594 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2595 RTE_PTYPE_INNER_L4_ICMP,
2597 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2598 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2599 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2600 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2601 RTE_PTYPE_INNER_L4_FRAG,
2602 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2603 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2604 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2605 RTE_PTYPE_INNER_L4_NONFRAG,
2606 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2607 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2608 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2609 RTE_PTYPE_INNER_L4_UDP,
2611 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2612 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2613 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2614 RTE_PTYPE_INNER_L4_TCP,
2615 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2616 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2617 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2618 RTE_PTYPE_INNER_L4_SCTP,
2619 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2620 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2621 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2622 RTE_PTYPE_INNER_L4_ICMP,
2624 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2625 [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2626 RTE_PTYPE_TUNNEL_GRENAT |
2627 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2629 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2630 [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2631 RTE_PTYPE_TUNNEL_GRENAT |
2632 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2633 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2634 RTE_PTYPE_INNER_L4_FRAG,
2635 [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2636 RTE_PTYPE_TUNNEL_GRENAT |
2637 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2638 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2639 RTE_PTYPE_INNER_L4_NONFRAG,
2640 [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2641 RTE_PTYPE_TUNNEL_GRENAT |
2642 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2643 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2644 RTE_PTYPE_INNER_L4_UDP,
2646 [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2647 RTE_PTYPE_TUNNEL_GRENAT |
2648 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2649 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2650 RTE_PTYPE_INNER_L4_TCP,
2651 [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2652 RTE_PTYPE_TUNNEL_GRENAT |
2653 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2654 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2655 RTE_PTYPE_INNER_L4_SCTP,
2656 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2657 RTE_PTYPE_TUNNEL_GRENAT |
2658 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2659 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2660 RTE_PTYPE_INNER_L4_ICMP,
2662 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
2663 [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2664 RTE_PTYPE_TUNNEL_GRENAT |
2665 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2666 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2667 RTE_PTYPE_INNER_L4_FRAG,
2668 [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2669 RTE_PTYPE_TUNNEL_GRENAT |
2670 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2671 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2672 RTE_PTYPE_INNER_L4_NONFRAG,
2673 [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2674 RTE_PTYPE_TUNNEL_GRENAT |
2675 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2676 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2677 RTE_PTYPE_INNER_L4_UDP,
2679 [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2680 RTE_PTYPE_TUNNEL_GRENAT |
2681 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2682 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2683 RTE_PTYPE_INNER_L4_TCP,
2684 [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2685 RTE_PTYPE_TUNNEL_GRENAT |
2686 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2687 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2688 RTE_PTYPE_INNER_L4_SCTP,
2689 [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2690 RTE_PTYPE_TUNNEL_GRENAT |
2691 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2692 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2693 RTE_PTYPE_INNER_L4_ICMP,
2695 /* Non tunneled IPv6 */
2696 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2698 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2699 RTE_PTYPE_L4_NONFRAG,
2700 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2703 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2705 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2707 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2711 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2712 RTE_PTYPE_TUNNEL_IP |
2713 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2714 RTE_PTYPE_INNER_L4_FRAG,
2715 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2716 RTE_PTYPE_TUNNEL_IP |
2717 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2718 RTE_PTYPE_INNER_L4_NONFRAG,
2719 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2720 RTE_PTYPE_TUNNEL_IP |
2721 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2722 RTE_PTYPE_INNER_L4_UDP,
2724 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2725 RTE_PTYPE_TUNNEL_IP |
2726 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2727 RTE_PTYPE_INNER_L4_TCP,
2728 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2729 RTE_PTYPE_TUNNEL_IP |
2730 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2731 RTE_PTYPE_INNER_L4_SCTP,
2732 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2733 RTE_PTYPE_TUNNEL_IP |
2734 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2735 RTE_PTYPE_INNER_L4_ICMP,
2738 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2739 RTE_PTYPE_TUNNEL_IP |
2740 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2741 RTE_PTYPE_INNER_L4_FRAG,
2742 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2743 RTE_PTYPE_TUNNEL_IP |
2744 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2745 RTE_PTYPE_INNER_L4_NONFRAG,
2746 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2747 RTE_PTYPE_TUNNEL_IP |
2748 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2749 RTE_PTYPE_INNER_L4_UDP,
2750 /* [105] reserved */
2751 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2752 RTE_PTYPE_TUNNEL_IP |
2753 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2754 RTE_PTYPE_INNER_L4_TCP,
2755 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2756 RTE_PTYPE_TUNNEL_IP |
2757 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2758 RTE_PTYPE_INNER_L4_SCTP,
2759 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2760 RTE_PTYPE_TUNNEL_IP |
2761 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2762 RTE_PTYPE_INNER_L4_ICMP,
2764 /* IPv6 --> GRE/Teredo/VXLAN */
2765 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2766 RTE_PTYPE_TUNNEL_GRENAT,
2768 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2769 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2770 RTE_PTYPE_TUNNEL_GRENAT |
2771 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2772 RTE_PTYPE_INNER_L4_FRAG,
2773 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2774 RTE_PTYPE_TUNNEL_GRENAT |
2775 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2776 RTE_PTYPE_INNER_L4_NONFRAG,
2777 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2778 RTE_PTYPE_TUNNEL_GRENAT |
2779 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2780 RTE_PTYPE_INNER_L4_UDP,
2781 /* [113] reserved */
2782 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2783 RTE_PTYPE_TUNNEL_GRENAT |
2784 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2785 RTE_PTYPE_INNER_L4_TCP,
2786 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2787 RTE_PTYPE_TUNNEL_GRENAT |
2788 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2789 RTE_PTYPE_INNER_L4_SCTP,
2790 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2791 RTE_PTYPE_TUNNEL_GRENAT |
2792 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2793 RTE_PTYPE_INNER_L4_ICMP,
2795 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2796 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2797 RTE_PTYPE_TUNNEL_GRENAT |
2798 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2799 RTE_PTYPE_INNER_L4_FRAG,
2800 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2801 RTE_PTYPE_TUNNEL_GRENAT |
2802 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2803 RTE_PTYPE_INNER_L4_NONFRAG,
2804 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2805 RTE_PTYPE_TUNNEL_GRENAT |
2806 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2807 RTE_PTYPE_INNER_L4_UDP,
2808 /* [120] reserved */
2809 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2810 RTE_PTYPE_TUNNEL_GRENAT |
2811 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2812 RTE_PTYPE_INNER_L4_TCP,
2813 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2814 RTE_PTYPE_TUNNEL_GRENAT |
2815 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2816 RTE_PTYPE_INNER_L4_SCTP,
2817 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2818 RTE_PTYPE_TUNNEL_GRENAT |
2819 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2820 RTE_PTYPE_INNER_L4_ICMP,
2822 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2823 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2824 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2826 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2827 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2828 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2829 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2830 RTE_PTYPE_INNER_L4_FRAG,
2831 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2832 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2833 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2834 RTE_PTYPE_INNER_L4_NONFRAG,
2835 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2836 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2837 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2838 RTE_PTYPE_INNER_L4_UDP,
2839 /* [128] reserved */
2840 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2841 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2842 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2843 RTE_PTYPE_INNER_L4_TCP,
2844 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2845 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2846 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2847 RTE_PTYPE_INNER_L4_SCTP,
2848 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2849 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2850 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2851 RTE_PTYPE_INNER_L4_ICMP,
2853 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2854 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2855 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2856 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2857 RTE_PTYPE_INNER_L4_FRAG,
2858 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2859 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2860 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2861 RTE_PTYPE_INNER_L4_NONFRAG,
2862 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2863 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2864 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2865 RTE_PTYPE_INNER_L4_UDP,
2866 /* [135] reserved */
2867 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2868 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2869 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2870 RTE_PTYPE_INNER_L4_TCP,
2871 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2872 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2873 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2874 RTE_PTYPE_INNER_L4_SCTP,
2875 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2876 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2877 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2878 RTE_PTYPE_INNER_L4_ICMP,
2880 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2881 [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2882 RTE_PTYPE_TUNNEL_GRENAT |
2883 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2885 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2886 [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2887 RTE_PTYPE_TUNNEL_GRENAT |
2888 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2889 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2890 RTE_PTYPE_INNER_L4_FRAG,
2891 [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2892 RTE_PTYPE_TUNNEL_GRENAT |
2893 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2894 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2895 RTE_PTYPE_INNER_L4_NONFRAG,
2896 [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2897 RTE_PTYPE_TUNNEL_GRENAT |
2898 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2899 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2900 RTE_PTYPE_INNER_L4_UDP,
2901 /* [143] reserved */
2902 [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2903 RTE_PTYPE_TUNNEL_GRENAT |
2904 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2905 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2906 RTE_PTYPE_INNER_L4_TCP,
2907 [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2908 RTE_PTYPE_TUNNEL_GRENAT |
2909 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2910 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2911 RTE_PTYPE_INNER_L4_SCTP,
2912 [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2913 RTE_PTYPE_TUNNEL_GRENAT |
2914 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2915 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2916 RTE_PTYPE_INNER_L4_ICMP,
2918 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
2919 [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2920 RTE_PTYPE_TUNNEL_GRENAT |
2921 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2922 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2923 RTE_PTYPE_INNER_L4_FRAG,
2924 [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2925 RTE_PTYPE_TUNNEL_GRENAT |
2926 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2927 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2928 RTE_PTYPE_INNER_L4_NONFRAG,
2929 [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2930 RTE_PTYPE_TUNNEL_GRENAT |
2931 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2932 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2933 RTE_PTYPE_INNER_L4_UDP,
2934 /* [150] reserved */
2935 [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2936 RTE_PTYPE_TUNNEL_GRENAT |
2937 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2938 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2939 RTE_PTYPE_INNER_L4_TCP,
2940 [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2941 RTE_PTYPE_TUNNEL_GRENAT |
2942 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2943 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2944 RTE_PTYPE_INNER_L4_SCTP,
2945 [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2946 RTE_PTYPE_TUNNEL_GRENAT |
2947 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2948 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2949 RTE_PTYPE_INNER_L4_ICMP,
2950 /* [154] - [255] reserved */
2951 [256] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2952 RTE_PTYPE_TUNNEL_GTPC,
2953 [257] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2954 RTE_PTYPE_TUNNEL_GTPC,
2955 [258] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2956 RTE_PTYPE_TUNNEL_GTPU,
2957 [259] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2958 RTE_PTYPE_TUNNEL_GTPU,
2959 /* [260] - [263] reserved */
2960 [264] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2961 RTE_PTYPE_TUNNEL_GTPC,
2962 [265] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2963 RTE_PTYPE_TUNNEL_GTPC,
2964 [266] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2965 RTE_PTYPE_TUNNEL_GTPU,
2966 [267] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2967 RTE_PTYPE_TUNNEL_GTPU,
2969 /* All others reserved */
2972 return type_table[ptype];
2975 void __attribute__((cold))
2976 ice_set_default_ptype_table(struct rte_eth_dev *dev)
2978 struct ice_adapter *ad =
2979 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2982 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
2983 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);