1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
10 #define ICE_TD_CMD ICE_TX_DESC_CMD_EOP
12 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
16 PKT_TX_OUTER_IP_CKSUM)
18 #define ICE_RX_ERR_BITS 0x3f
20 static enum ice_status
21 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
23 struct ice_vsi *vsi = rxq->vsi;
24 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
25 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
26 struct ice_rlan_ctx rx_ctx;
28 uint16_t buf_size, len;
29 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
33 * The kernel driver uses flex descriptor. It sets the register
34 * to flex descriptor mode.
35 * DPDK uses legacy descriptor. It should set the register back
36 * to the default value, then uses legacy descriptor mode.
38 regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
39 QRXFLXP_CNTXT_RXDID_PRIO_M;
40 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
42 /* Set buffer size as the head split is disabled. */
43 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
44 RTE_PKTMBUF_HEADROOM);
46 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
47 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
48 rxq->max_pkt_len = RTE_MIN(len,
49 dev->data->dev_conf.rxmode.max_rx_pkt_len);
51 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
52 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
53 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
54 PMD_DRV_LOG(ERR, "maximum packet length must "
55 "be larger than %u and smaller than %u,"
56 "as jumbo frame is enabled",
57 (uint32_t)ETHER_MAX_LEN,
58 (uint32_t)ICE_FRAME_SIZE_MAX);
62 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
63 rxq->max_pkt_len > ETHER_MAX_LEN) {
64 PMD_DRV_LOG(ERR, "maximum packet length must be "
65 "larger than %u and smaller than %u, "
66 "as jumbo frame is disabled",
67 (uint32_t)ETHER_MIN_LEN,
68 (uint32_t)ETHER_MAX_LEN);
73 memset(&rx_ctx, 0, sizeof(rx_ctx));
75 rx_ctx.base = rxq->rx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
76 rx_ctx.qlen = rxq->nb_rx_desc;
77 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
78 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
79 rx_ctx.dtype = 0; /* No Header Split mode */
80 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
81 rx_ctx.dsize = 1; /* 32B descriptors */
83 rx_ctx.rxmax = rxq->max_pkt_len;
84 /* TPH: Transaction Layer Packet (TLP) processing hints */
85 rx_ctx.tphrdesc_ena = 1;
86 rx_ctx.tphwdesc_ena = 1;
87 rx_ctx.tphdata_ena = 1;
88 rx_ctx.tphhead_ena = 1;
89 /* Low Receive Queue Threshold defined in 64 descriptors units.
90 * When the number of free descriptors goes below the lrxqthresh,
91 * an immediate interrupt is triggered.
93 rx_ctx.lrxqthresh = 2;
94 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
97 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
99 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
101 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
105 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
107 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
112 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
113 RTE_PKTMBUF_HEADROOM);
115 /* Check if scattered RX needs to be used. */
116 if ((rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size)
117 dev->data->scattered_rx = 1;
119 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
121 /* Init the Rx tail register*/
122 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
127 /* Allocate mbufs for all descriptors in rx queue */
129 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
131 struct ice_rx_entry *rxe = rxq->sw_ring;
135 for (i = 0; i < rxq->nb_rx_desc; i++) {
136 volatile union ice_rx_desc *rxd;
137 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
139 if (unlikely(!mbuf)) {
140 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
144 rte_mbuf_refcnt_set(mbuf, 1);
146 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
148 mbuf->port = rxq->port_id;
151 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
153 rxd = &rxq->rx_ring[i];
154 rxd->read.pkt_addr = dma_addr;
155 rxd->read.hdr_addr = 0;
156 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
166 /* Free all mbufs for descriptors in rx queue */
168 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
172 if (!rxq || !rxq->sw_ring) {
173 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
177 for (i = 0; i < rxq->nb_rx_desc; i++) {
178 if (rxq->sw_ring[i].mbuf) {
179 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
180 rxq->sw_ring[i].mbuf = NULL;
183 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
184 if (rxq->rx_nb_avail == 0)
186 for (i = 0; i < rxq->rx_nb_avail; i++) {
187 struct rte_mbuf *mbuf;
189 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
190 rte_pktmbuf_free_seg(mbuf);
192 rxq->rx_nb_avail = 0;
193 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
197 ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
199 rxq->rx_rel_mbufs(rxq);
202 /* turn on or off rx queue
203 * @q_idx: queue index in pf scope
204 * @on: turn on or off the queue
207 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
212 /* QRX_CTRL = QRX_ENA */
213 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
216 if (reg & QRX_CTRL_QENA_STAT_M)
217 return 0; /* Already on, skip */
218 reg |= QRX_CTRL_QENA_REQ_M;
220 if (!(reg & QRX_CTRL_QENA_STAT_M))
221 return 0; /* Already off, skip */
222 reg &= ~QRX_CTRL_QENA_REQ_M;
225 /* Write the register */
226 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
227 /* Check the result. It is said that QENA_STAT
228 * follows the QENA_REQ not more than 10 use.
229 * TODO: need to change the wait counter later
231 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
232 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
233 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
235 if ((reg & QRX_CTRL_QENA_REQ_M) &&
236 (reg & QRX_CTRL_QENA_STAT_M))
239 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
240 !(reg & QRX_CTRL_QENA_STAT_M))
245 /* Check if it is timeout */
246 if (j >= ICE_CHK_Q_ENA_COUNT) {
247 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
248 (on ? "enable" : "disable"), q_idx);
256 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
257 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
259 ice_check_rx_burst_bulk_alloc_preconditions
260 (__rte_unused struct ice_rx_queue *rxq)
265 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
266 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
267 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
268 "rxq->rx_free_thresh=%d, "
269 "ICE_RX_MAX_BURST=%d",
270 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
272 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
273 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
274 "rxq->rx_free_thresh=%d, "
275 "rxq->nb_rx_desc=%d",
276 rxq->rx_free_thresh, rxq->nb_rx_desc);
278 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
279 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
280 "rxq->nb_rx_desc=%d, "
281 "rxq->rx_free_thresh=%d",
282 rxq->nb_rx_desc, rxq->rx_free_thresh);
292 /* reset fields in ice_rx_queue back to default */
294 ice_reset_rx_queue(struct ice_rx_queue *rxq)
300 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
304 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
305 if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
306 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
308 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
309 len = rxq->nb_rx_desc;
311 for (i = 0; i < len * sizeof(union ice_rx_desc); i++)
312 ((volatile char *)rxq->rx_ring)[i] = 0;
314 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
315 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
316 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
317 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
319 rxq->rx_nb_avail = 0;
320 rxq->rx_next_avail = 0;
321 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
322 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
326 rxq->pkt_first_seg = NULL;
327 rxq->pkt_last_seg = NULL;
331 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
333 struct ice_rx_queue *rxq;
335 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
337 PMD_INIT_FUNC_TRACE();
339 if (rx_queue_id >= dev->data->nb_rx_queues) {
340 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
341 rx_queue_id, dev->data->nb_rx_queues);
345 rxq = dev->data->rx_queues[rx_queue_id];
346 if (!rxq || !rxq->q_set) {
347 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
352 err = ice_program_hw_rx_queue(rxq);
354 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
359 err = ice_alloc_rx_queue_mbufs(rxq);
361 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
367 /* Init the RX tail register. */
368 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
370 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
372 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
375 ice_rx_queue_release_mbufs(rxq);
376 ice_reset_rx_queue(rxq);
380 dev->data->rx_queue_state[rx_queue_id] =
381 RTE_ETH_QUEUE_STATE_STARTED;
387 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
389 struct ice_rx_queue *rxq;
391 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
393 if (rx_queue_id < dev->data->nb_rx_queues) {
394 rxq = dev->data->rx_queues[rx_queue_id];
396 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
398 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
402 ice_rx_queue_release_mbufs(rxq);
403 ice_reset_rx_queue(rxq);
404 dev->data->rx_queue_state[rx_queue_id] =
405 RTE_ETH_QUEUE_STATE_STOPPED;
412 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
414 struct ice_tx_queue *txq;
418 struct ice_aqc_add_tx_qgrp txq_elem;
419 struct ice_tlan_ctx tx_ctx;
421 PMD_INIT_FUNC_TRACE();
423 if (tx_queue_id >= dev->data->nb_tx_queues) {
424 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
425 tx_queue_id, dev->data->nb_tx_queues);
429 txq = dev->data->tx_queues[tx_queue_id];
430 if (!txq || !txq->q_set) {
431 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
437 hw = ICE_VSI_TO_HW(vsi);
439 memset(&txq_elem, 0, sizeof(txq_elem));
440 memset(&tx_ctx, 0, sizeof(tx_ctx));
441 txq_elem.num_txqs = 1;
442 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
444 tx_ctx.base = txq->tx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
445 tx_ctx.qlen = txq->nb_tx_desc;
446 tx_ctx.pf_num = hw->pf_id;
447 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
448 tx_ctx.src_vsi = vsi->vsi_id;
449 tx_ctx.port_num = hw->port_info->lport;
450 tx_ctx.tso_ena = 1; /* tso enable */
451 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
452 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
454 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
457 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
459 /* Init the Tx tail register*/
460 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
462 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1, &txq_elem,
463 sizeof(txq_elem), NULL);
465 PMD_DRV_LOG(ERR, "Failed to add lan txq");
468 /* store the schedule node id */
469 txq->q_teid = txq_elem.txqs[0].q_teid;
471 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
475 /* Free all mbufs for descriptors in tx queue */
477 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
481 if (!txq || !txq->sw_ring) {
482 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
486 for (i = 0; i < txq->nb_tx_desc; i++) {
487 if (txq->sw_ring[i].mbuf) {
488 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
489 txq->sw_ring[i].mbuf = NULL;
494 ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
496 txq->tx_rel_mbufs(txq);
500 ice_reset_tx_queue(struct ice_tx_queue *txq)
502 struct ice_tx_entry *txe;
503 uint16_t i, prev, size;
506 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
511 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
512 for (i = 0; i < size; i++)
513 ((volatile char *)txq->tx_ring)[i] = 0;
515 prev = (uint16_t)(txq->nb_tx_desc - 1);
516 for (i = 0; i < txq->nb_tx_desc; i++) {
517 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
519 txd->cmd_type_offset_bsz =
520 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
523 txe[prev].next_id = i;
527 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
528 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
533 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
534 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
538 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
540 struct ice_tx_queue *txq;
541 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
542 enum ice_status status;
546 if (tx_queue_id >= dev->data->nb_tx_queues) {
547 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
548 tx_queue_id, dev->data->nb_tx_queues);
552 txq = dev->data->tx_queues[tx_queue_id];
554 PMD_DRV_LOG(ERR, "TX queue %u is not available",
559 q_ids[0] = txq->reg_idx;
560 q_teids[0] = txq->q_teid;
562 status = ice_dis_vsi_txq(hw->port_info, 1, q_ids, q_teids,
563 ICE_NO_RESET, 0, NULL);
564 if (status != ICE_SUCCESS) {
565 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
569 ice_tx_queue_release_mbufs(txq);
570 ice_reset_tx_queue(txq);
571 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
577 ice_rx_queue_setup(struct rte_eth_dev *dev,
580 unsigned int socket_id,
581 const struct rte_eth_rxconf *rx_conf,
582 struct rte_mempool *mp)
584 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
585 struct ice_adapter *ad =
586 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
587 struct ice_vsi *vsi = pf->main_vsi;
588 struct ice_rx_queue *rxq;
589 const struct rte_memzone *rz;
592 int use_def_burst_func = 1;
594 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
595 nb_desc > ICE_MAX_RING_DESC ||
596 nb_desc < ICE_MIN_RING_DESC) {
597 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
602 /* Free memory if needed */
603 if (dev->data->rx_queues[queue_idx]) {
604 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
605 dev->data->rx_queues[queue_idx] = NULL;
608 /* Allocate the rx queue data structure */
609 rxq = rte_zmalloc_socket(NULL,
610 sizeof(struct ice_rx_queue),
614 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
615 "rx queue data structure");
619 rxq->nb_rx_desc = nb_desc;
620 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
621 rxq->queue_id = queue_idx;
623 rxq->reg_idx = vsi->base_queue + queue_idx;
624 rxq->port_id = dev->data->port_id;
625 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
626 rxq->crc_len = ETHER_CRC_LEN;
630 rxq->drop_en = rx_conf->rx_drop_en;
632 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
634 /* Allocate the maximun number of RX ring hardware descriptor. */
635 len = ICE_MAX_RING_DESC;
637 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
639 * Allocating a little more memory because vectorized/bulk_alloc Rx
640 * functions doesn't check boundaries each time.
642 len += ICE_RX_MAX_BURST;
645 /* Allocate the maximum number of RX ring hardware descriptor. */
646 ring_size = sizeof(union ice_rx_desc) * len;
647 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
648 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
649 ring_size, ICE_RING_BASE_ALIGN,
652 ice_rx_queue_release(rxq);
653 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
657 /* Zero all the descriptors in the ring. */
658 memset(rz->addr, 0, ring_size);
660 rxq->rx_ring_phys_addr = rz->phys_addr;
661 rxq->rx_ring = (union ice_rx_desc *)rz->addr;
663 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
664 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
669 /* Allocate the software ring. */
670 rxq->sw_ring = rte_zmalloc_socket(NULL,
671 sizeof(struct ice_rx_entry) * len,
675 ice_rx_queue_release(rxq);
676 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
680 ice_reset_rx_queue(rxq);
682 dev->data->rx_queues[queue_idx] = rxq;
683 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
685 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
687 if (!use_def_burst_func) {
688 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
689 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
690 "satisfied. Rx Burst Bulk Alloc function will be "
691 "used on port=%d, queue=%d.",
692 rxq->port_id, rxq->queue_id);
693 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
695 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
696 "not satisfied, Scattered Rx is requested, "
697 "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
698 "not enabled on port=%d, queue=%d.",
699 rxq->port_id, rxq->queue_id);
700 ad->rx_bulk_alloc_allowed = false;
707 ice_rx_queue_release(void *rxq)
709 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
712 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
716 ice_rx_queue_release_mbufs(q);
717 rte_free(q->sw_ring);
722 ice_tx_queue_setup(struct rte_eth_dev *dev,
725 unsigned int socket_id,
726 const struct rte_eth_txconf *tx_conf)
728 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
729 struct ice_vsi *vsi = pf->main_vsi;
730 struct ice_tx_queue *txq;
731 const struct rte_memzone *tz;
733 uint16_t tx_rs_thresh, tx_free_thresh;
736 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
738 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
739 nb_desc > ICE_MAX_RING_DESC ||
740 nb_desc < ICE_MIN_RING_DESC) {
741 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
747 * The following two parameters control the setting of the RS bit on
748 * transmit descriptors. TX descriptors will have their RS bit set
749 * after txq->tx_rs_thresh descriptors have been used. The TX
750 * descriptor ring will be cleaned after txq->tx_free_thresh
751 * descriptors are used or if the number of descriptors required to
752 * transmit a packet is greater than the number of free TX descriptors.
754 * The following constraints must be satisfied:
755 * - tx_rs_thresh must be greater than 0.
756 * - tx_rs_thresh must be less than the size of the ring minus 2.
757 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
758 * - tx_rs_thresh must be a divisor of the ring size.
759 * - tx_free_thresh must be greater than 0.
760 * - tx_free_thresh must be less than the size of the ring minus 3.
762 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
763 * race condition, hence the maximum threshold constraints. When set
764 * to zero use default values.
766 tx_rs_thresh = (uint16_t)(tx_conf->tx_rs_thresh ?
767 tx_conf->tx_rs_thresh :
768 ICE_DEFAULT_TX_RSBIT_THRESH);
769 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
770 tx_conf->tx_free_thresh :
771 ICE_DEFAULT_TX_FREE_THRESH);
772 if (tx_rs_thresh >= (nb_desc - 2)) {
773 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
774 "number of TX descriptors minus 2. "
775 "(tx_rs_thresh=%u port=%d queue=%d)",
776 (unsigned int)tx_rs_thresh,
777 (int)dev->data->port_id,
781 if (tx_free_thresh >= (nb_desc - 3)) {
782 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
783 "tx_free_thresh must be less than the "
784 "number of TX descriptors minus 3. "
785 "(tx_free_thresh=%u port=%d queue=%d)",
786 (unsigned int)tx_free_thresh,
787 (int)dev->data->port_id,
791 if (tx_rs_thresh > tx_free_thresh) {
792 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
793 "equal to tx_free_thresh. (tx_free_thresh=%u"
794 " tx_rs_thresh=%u port=%d queue=%d)",
795 (unsigned int)tx_free_thresh,
796 (unsigned int)tx_rs_thresh,
797 (int)dev->data->port_id,
801 if ((nb_desc % tx_rs_thresh) != 0) {
802 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
803 "number of TX descriptors. (tx_rs_thresh=%u"
804 " port=%d queue=%d)",
805 (unsigned int)tx_rs_thresh,
806 (int)dev->data->port_id,
810 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
811 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
812 "tx_rs_thresh is greater than 1. "
813 "(tx_rs_thresh=%u port=%d queue=%d)",
814 (unsigned int)tx_rs_thresh,
815 (int)dev->data->port_id,
820 /* Free memory if needed. */
821 if (dev->data->tx_queues[queue_idx]) {
822 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
823 dev->data->tx_queues[queue_idx] = NULL;
826 /* Allocate the TX queue data structure. */
827 txq = rte_zmalloc_socket(NULL,
828 sizeof(struct ice_tx_queue),
832 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
833 "tx queue structure");
837 /* Allocate TX hardware ring descriptors. */
838 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
839 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
840 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
841 ring_size, ICE_RING_BASE_ALIGN,
844 ice_tx_queue_release(txq);
845 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
849 txq->nb_tx_desc = nb_desc;
850 txq->tx_rs_thresh = tx_rs_thresh;
851 txq->tx_free_thresh = tx_free_thresh;
852 txq->pthresh = tx_conf->tx_thresh.pthresh;
853 txq->hthresh = tx_conf->tx_thresh.hthresh;
854 txq->wthresh = tx_conf->tx_thresh.wthresh;
855 txq->queue_id = queue_idx;
857 txq->reg_idx = vsi->base_queue + queue_idx;
858 txq->port_id = dev->data->port_id;
859 txq->offloads = offloads;
861 txq->tx_deferred_start = tx_conf->tx_deferred_start;
863 txq->tx_ring_phys_addr = tz->phys_addr;
864 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
866 /* Allocate software ring */
868 rte_zmalloc_socket(NULL,
869 sizeof(struct ice_tx_entry) * nb_desc,
873 ice_tx_queue_release(txq);
874 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
878 ice_reset_tx_queue(txq);
880 dev->data->tx_queues[queue_idx] = txq;
881 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
887 ice_tx_queue_release(void *txq)
889 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
892 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
896 ice_tx_queue_release_mbufs(q);
897 rte_free(q->sw_ring);
902 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
903 struct rte_eth_rxq_info *qinfo)
905 struct ice_rx_queue *rxq;
907 rxq = dev->data->rx_queues[queue_id];
910 qinfo->scattered_rx = dev->data->scattered_rx;
911 qinfo->nb_desc = rxq->nb_rx_desc;
913 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
914 qinfo->conf.rx_drop_en = rxq->drop_en;
915 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
919 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
920 struct rte_eth_txq_info *qinfo)
922 struct ice_tx_queue *txq;
924 txq = dev->data->tx_queues[queue_id];
926 qinfo->nb_desc = txq->nb_tx_desc;
928 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
929 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
930 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
932 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
933 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
934 qinfo->conf.offloads = txq->offloads;
935 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
939 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
941 #define ICE_RXQ_SCAN_INTERVAL 4
942 volatile union ice_rx_desc *rxdp;
943 struct ice_rx_queue *rxq;
946 rxq = dev->data->rx_queues[rx_queue_id];
947 rxdp = &rxq->rx_ring[rxq->rx_tail];
948 while ((desc < rxq->nb_rx_desc) &&
949 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
950 ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S) &
951 (1 << ICE_RX_DESC_STATUS_DD_S)) {
953 * Check the DD bit of a rx descriptor of each 4 in a group,
954 * to avoid checking too frequently and downgrading performance
957 desc += ICE_RXQ_SCAN_INTERVAL;
958 rxdp += ICE_RXQ_SCAN_INTERVAL;
959 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
960 rxdp = &(rxq->rx_ring[rxq->rx_tail +
961 desc - rxq->nb_rx_desc]);
967 /* Translate the rx descriptor status to pkt flags */
968 static inline uint64_t
969 ice_rxd_status_to_pkt_flags(uint64_t qword)
973 /* Check if RSS_HASH */
974 flags = (((qword >> ICE_RX_DESC_STATUS_FLTSTAT_S) &
975 ICE_RX_DESC_FLTSTAT_RSS_HASH) ==
976 ICE_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
981 /* Rx L3/L4 checksum */
982 static inline uint64_t
983 ice_rxd_error_to_pkt_flags(uint64_t qword)
986 uint64_t error_bits = (qword >> ICE_RXD_QW1_ERROR_S);
988 if (likely((error_bits & ICE_RX_ERR_BITS) == 0)) {
989 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
993 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_IPE_S)))
994 flags |= PKT_RX_IP_CKSUM_BAD;
996 flags |= PKT_RX_IP_CKSUM_GOOD;
998 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_L4E_S)))
999 flags |= PKT_RX_L4_CKSUM_BAD;
1001 flags |= PKT_RX_L4_CKSUM_GOOD;
1003 if (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_EIPE_S)))
1004 flags |= PKT_RX_EIP_CKSUM_BAD;
1010 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)
1012 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1013 (1 << ICE_RX_DESC_STATUS_L2TAG1P_S)) {
1014 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1016 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1017 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1018 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
1023 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1024 if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
1025 (1 << ICE_RX_DESC_EXT_STATUS_L2TAG2P_S)) {
1026 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1027 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1028 mb->vlan_tci_outer = mb->vlan_tci;
1029 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
1030 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1031 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
1032 rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
1034 mb->vlan_tci_outer = 0;
1037 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1038 mb->vlan_tci, mb->vlan_tci_outer);
1041 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1042 #define ICE_LOOK_AHEAD 8
1043 #if (ICE_LOOK_AHEAD != 8)
1044 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1047 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1049 volatile union ice_rx_desc *rxdp;
1050 struct ice_rx_entry *rxep;
1051 struct rte_mbuf *mb;
1055 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1056 int32_t i, j, nb_rx = 0;
1057 uint64_t pkt_flags = 0;
1058 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1060 rxdp = &rxq->rx_ring[rxq->rx_tail];
1061 rxep = &rxq->sw_ring[rxq->rx_tail];
1063 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1064 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S;
1066 /* Make sure there is at least 1 packet to receive */
1067 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1071 * Scan LOOK_AHEAD descriptors at a time to determine which
1072 * descriptors reference packets that are ready to be received.
1074 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1075 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1076 /* Read desc statuses backwards to avoid race condition */
1077 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--) {
1078 qword1 = rte_le_to_cpu_64(
1079 rxdp[j].wb.qword1.status_error_len);
1080 s[j] = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1081 ICE_RXD_QW1_STATUS_S;
1086 /* Compute how many status bits were set */
1087 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1088 nb_dd += s[j] & (1 << ICE_RX_DESC_STATUS_DD_S);
1092 /* Translate descriptor info to mbuf parameters */
1093 for (j = 0; j < nb_dd; j++) {
1095 qword1 = rte_le_to_cpu_64(
1096 rxdp[j].wb.qword1.status_error_len);
1097 pkt_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1098 ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
1099 mb->data_len = pkt_len;
1100 mb->pkt_len = pkt_len;
1102 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1103 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1104 if (pkt_flags & PKT_RX_RSS_HASH)
1107 rxdp[j].wb.qword0.hi_dword.rss);
1108 mb->packet_type = ptype_tbl[(uint8_t)(
1110 ICE_RXD_QW1_PTYPE_M) >>
1111 ICE_RXD_QW1_PTYPE_S)];
1112 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1114 mb->ol_flags |= pkt_flags;
1117 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1118 rxq->rx_stage[i + j] = rxep[j].mbuf;
1120 if (nb_dd != ICE_LOOK_AHEAD)
1124 /* Clear software ring entries */
1125 for (i = 0; i < nb_rx; i++)
1126 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1128 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1129 "port_id=%u, queue_id=%u, nb_rx=%d",
1130 rxq->port_id, rxq->queue_id, nb_rx);
1135 static inline uint16_t
1136 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1137 struct rte_mbuf **rx_pkts,
1141 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1143 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1145 for (i = 0; i < nb_pkts; i++)
1146 rx_pkts[i] = stage[i];
1148 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1149 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1155 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1157 volatile union ice_rx_desc *rxdp;
1158 struct ice_rx_entry *rxep;
1159 struct rte_mbuf *mb;
1160 uint16_t alloc_idx, i;
1164 /* Allocate buffers in bulk */
1165 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1166 (rxq->rx_free_thresh - 1));
1167 rxep = &rxq->sw_ring[alloc_idx];
1168 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1169 rxq->rx_free_thresh);
1170 if (unlikely(diag != 0)) {
1171 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1175 rxdp = &rxq->rx_ring[alloc_idx];
1176 for (i = 0; i < rxq->rx_free_thresh; i++) {
1177 if (likely(i < (rxq->rx_free_thresh - 1)))
1178 /* Prefetch next mbuf */
1179 rte_prefetch0(rxep[i + 1].mbuf);
1182 rte_mbuf_refcnt_set(mb, 1);
1184 mb->data_off = RTE_PKTMBUF_HEADROOM;
1186 mb->port = rxq->port_id;
1187 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1188 rxdp[i].read.hdr_addr = 0;
1189 rxdp[i].read.pkt_addr = dma_addr;
1192 /* Update rx tail regsiter */
1194 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1196 rxq->rx_free_trigger =
1197 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1198 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1199 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1204 static inline uint16_t
1205 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1207 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1209 struct rte_eth_dev *dev;
1214 if (rxq->rx_nb_avail)
1215 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1217 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1218 rxq->rx_next_avail = 0;
1219 rxq->rx_nb_avail = nb_rx;
1220 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1222 if (rxq->rx_tail > rxq->rx_free_trigger) {
1223 if (ice_rx_alloc_bufs(rxq) != 0) {
1226 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1227 dev->data->rx_mbuf_alloc_failed +=
1228 rxq->rx_free_thresh;
1229 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1230 "port_id=%u, queue_id=%u",
1231 rxq->port_id, rxq->queue_id);
1232 rxq->rx_nb_avail = 0;
1233 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1234 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1235 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1241 if (rxq->rx_tail >= rxq->nb_rx_desc)
1244 if (rxq->rx_nb_avail)
1245 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1251 ice_recv_pkts_bulk_alloc(void *rx_queue,
1252 struct rte_mbuf **rx_pkts,
1259 if (unlikely(nb_pkts == 0))
1262 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1263 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1266 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1267 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1268 nb_rx = (uint16_t)(nb_rx + count);
1269 nb_pkts = (uint16_t)(nb_pkts - count);
1278 ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
1279 struct rte_mbuf __rte_unused **rx_pkts,
1280 uint16_t __rte_unused nb_pkts)
1284 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
1287 ice_recv_scattered_pkts(void *rx_queue,
1288 struct rte_mbuf **rx_pkts,
1291 struct ice_rx_queue *rxq = rx_queue;
1292 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1293 volatile union ice_rx_desc *rxdp;
1294 union ice_rx_desc rxd;
1295 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1296 struct ice_rx_entry *rxe;
1297 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1298 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1299 struct rte_mbuf *nmb; /* new allocated mbuf */
1300 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1301 uint16_t rx_id = rxq->rx_tail;
1303 uint16_t nb_hold = 0;
1304 uint16_t rx_packet_len;
1308 uint64_t pkt_flags = 0;
1309 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1310 struct rte_eth_dev *dev;
1312 while (nb_rx < nb_pkts) {
1313 rxdp = &rx_ring[rx_id];
1314 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1315 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1316 ICE_RXD_QW1_STATUS_S;
1318 /* Check the DD bit first */
1319 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1323 nmb = rte_mbuf_raw_alloc(rxq->mp);
1324 if (unlikely(!nmb)) {
1325 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1326 dev->data->rx_mbuf_alloc_failed++;
1329 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1332 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1334 if (unlikely(rx_id == rxq->nb_rx_desc))
1337 /* Prefetch next mbuf */
1338 rte_prefetch0(sw_ring[rx_id].mbuf);
1341 * When next RX descriptor is on a cache line boundary,
1342 * prefetch the next 4 RX descriptors and next 8 pointers
1345 if ((rx_id & 0x3) == 0) {
1346 rte_prefetch0(&rx_ring[rx_id]);
1347 rte_prefetch0(&sw_ring[rx_id]);
1353 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1355 /* Set data buffer address and data length of the mbuf */
1356 rxdp->read.hdr_addr = 0;
1357 rxdp->read.pkt_addr = dma_addr;
1358 rx_packet_len = (qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1359 ICE_RXD_QW1_LEN_PBUF_S;
1360 rxm->data_len = rx_packet_len;
1361 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1362 ice_rxd_to_vlan_tci(rxm, rxdp);
1363 rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
1364 ICE_RXD_QW1_PTYPE_M) >>
1365 ICE_RXD_QW1_PTYPE_S)];
1368 * If this is the first buffer of the received packet, set the
1369 * pointer to the first mbuf of the packet and initialize its
1370 * context. Otherwise, update the total length and the number
1371 * of segments of the current scattered packet, and update the
1372 * pointer to the last mbuf of the current packet.
1376 first_seg->nb_segs = 1;
1377 first_seg->pkt_len = rx_packet_len;
1379 first_seg->pkt_len =
1380 (uint16_t)(first_seg->pkt_len +
1382 first_seg->nb_segs++;
1383 last_seg->next = rxm;
1387 * If this is not the last buffer of the received packet,
1388 * update the pointer to the last mbuf of the current scattered
1389 * packet and continue to parse the RX ring.
1391 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_EOF_S))) {
1397 * This is the last buffer of the received packet. If the CRC
1398 * is not stripped by the hardware:
1399 * - Subtract the CRC length from the total packet length.
1400 * - If the last buffer only contains the whole CRC or a part
1401 * of it, free the mbuf associated to the last buffer. If part
1402 * of the CRC is also contained in the previous mbuf, subtract
1403 * the length of that CRC part from the data length of the
1407 if (unlikely(rxq->crc_len > 0)) {
1408 first_seg->pkt_len -= ETHER_CRC_LEN;
1409 if (rx_packet_len <= ETHER_CRC_LEN) {
1410 rte_pktmbuf_free_seg(rxm);
1411 first_seg->nb_segs--;
1412 last_seg->data_len =
1413 (uint16_t)(last_seg->data_len -
1414 (ETHER_CRC_LEN - rx_packet_len));
1415 last_seg->next = NULL;
1417 rxm->data_len = (uint16_t)(rx_packet_len -
1421 first_seg->port = rxq->port_id;
1422 first_seg->ol_flags = 0;
1424 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1425 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1426 if (pkt_flags & PKT_RX_RSS_HASH)
1427 first_seg->hash.rss =
1428 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1430 first_seg->ol_flags |= pkt_flags;
1431 /* Prefetch data of first segment, if configured to do so. */
1432 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1433 first_seg->data_off));
1434 rx_pkts[nb_rx++] = first_seg;
1438 /* Record index of the next RX descriptor to probe. */
1439 rxq->rx_tail = rx_id;
1440 rxq->pkt_first_seg = first_seg;
1441 rxq->pkt_last_seg = last_seg;
1444 * If the number of free RX descriptors is greater than the RX free
1445 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1446 * register. Update the RDT with the value of the last processed RX
1447 * descriptor minus 1, to guarantee that the RDT register is never
1448 * equal to the RDH register, which creates a "full" ring situtation
1449 * from the hardware point of view.
1451 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1452 if (nb_hold > rxq->rx_free_thresh) {
1453 rx_id = (uint16_t)(rx_id == 0 ?
1454 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1455 /* write TAIL register */
1456 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1459 rxq->nb_rx_hold = nb_hold;
1461 /* return received packet in the burst */
1466 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1468 static const uint32_t ptypes[] = {
1469 /* refers to ice_get_default_pkt_type() */
1471 RTE_PTYPE_L2_ETHER_LLDP,
1472 RTE_PTYPE_L2_ETHER_ARP,
1473 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1474 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1477 RTE_PTYPE_L4_NONFRAG,
1481 RTE_PTYPE_TUNNEL_GRENAT,
1482 RTE_PTYPE_TUNNEL_IP,
1483 RTE_PTYPE_INNER_L2_ETHER,
1484 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1485 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1486 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1487 RTE_PTYPE_INNER_L4_FRAG,
1488 RTE_PTYPE_INNER_L4_ICMP,
1489 RTE_PTYPE_INNER_L4_NONFRAG,
1490 RTE_PTYPE_INNER_L4_SCTP,
1491 RTE_PTYPE_INNER_L4_TCP,
1492 RTE_PTYPE_INNER_L4_UDP,
1493 RTE_PTYPE_TUNNEL_GTPC,
1494 RTE_PTYPE_TUNNEL_GTPU,
1498 if (dev->rx_pkt_burst == ice_recv_pkts ||
1499 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1500 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1502 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1508 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1510 struct ice_rx_queue *rxq = rx_queue;
1511 volatile uint64_t *status;
1515 if (unlikely(offset >= rxq->nb_rx_desc))
1518 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1519 return RTE_ETH_RX_DESC_UNAVAIL;
1521 desc = rxq->rx_tail + offset;
1522 if (desc >= rxq->nb_rx_desc)
1523 desc -= rxq->nb_rx_desc;
1525 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
1526 mask = rte_cpu_to_le_64((1ULL << ICE_RX_DESC_STATUS_DD_S) <<
1527 ICE_RXD_QW1_STATUS_S);
1529 return RTE_ETH_RX_DESC_DONE;
1531 return RTE_ETH_RX_DESC_AVAIL;
1535 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1537 struct ice_tx_queue *txq = tx_queue;
1538 volatile uint64_t *status;
1539 uint64_t mask, expect;
1542 if (unlikely(offset >= txq->nb_tx_desc))
1545 desc = txq->tx_tail + offset;
1546 /* go to next desc that has the RS bit */
1547 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1549 if (desc >= txq->nb_tx_desc) {
1550 desc -= txq->nb_tx_desc;
1551 if (desc >= txq->nb_tx_desc)
1552 desc -= txq->nb_tx_desc;
1555 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1556 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1557 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1558 ICE_TXD_QW1_DTYPE_S);
1559 if ((*status & mask) == expect)
1560 return RTE_ETH_TX_DESC_DONE;
1562 return RTE_ETH_TX_DESC_FULL;
1566 ice_clear_queues(struct rte_eth_dev *dev)
1570 PMD_INIT_FUNC_TRACE();
1572 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1573 ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
1574 ice_reset_tx_queue(dev->data->tx_queues[i]);
1577 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1578 ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
1579 ice_reset_rx_queue(dev->data->rx_queues[i]);
1584 ice_free_queues(struct rte_eth_dev *dev)
1588 PMD_INIT_FUNC_TRACE();
1590 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1591 if (!dev->data->rx_queues[i])
1593 ice_rx_queue_release(dev->data->rx_queues[i]);
1594 dev->data->rx_queues[i] = NULL;
1596 dev->data->nb_rx_queues = 0;
1598 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1599 if (!dev->data->tx_queues[i])
1601 ice_tx_queue_release(dev->data->tx_queues[i]);
1602 dev->data->tx_queues[i] = NULL;
1604 dev->data->nb_tx_queues = 0;
1608 ice_recv_pkts(void *rx_queue,
1609 struct rte_mbuf **rx_pkts,
1612 struct ice_rx_queue *rxq = rx_queue;
1613 volatile union ice_rx_desc *rx_ring = rxq->rx_ring;
1614 volatile union ice_rx_desc *rxdp;
1615 union ice_rx_desc rxd;
1616 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1617 struct ice_rx_entry *rxe;
1618 struct rte_mbuf *nmb; /* new allocated mbuf */
1619 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1620 uint16_t rx_id = rxq->rx_tail;
1622 uint16_t nb_hold = 0;
1623 uint16_t rx_packet_len;
1627 uint64_t pkt_flags = 0;
1628 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1629 struct rte_eth_dev *dev;
1631 while (nb_rx < nb_pkts) {
1632 rxdp = &rx_ring[rx_id];
1633 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1634 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>
1635 ICE_RXD_QW1_STATUS_S;
1637 /* Check the DD bit first */
1638 if (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))
1642 nmb = rte_mbuf_raw_alloc(rxq->mp);
1643 if (unlikely(!nmb)) {
1644 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1645 dev->data->rx_mbuf_alloc_failed++;
1648 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1651 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1653 if (unlikely(rx_id == rxq->nb_rx_desc))
1658 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1661 * fill the read format of descriptor with physic address in
1662 * new allocated mbuf: nmb
1664 rxdp->read.hdr_addr = 0;
1665 rxdp->read.pkt_addr = dma_addr;
1667 /* calculate rx_packet_len of the received pkt */
1668 rx_packet_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>
1669 ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;
1671 /* fill old mbuf with received descriptor: rxd */
1672 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1673 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1676 rxm->pkt_len = rx_packet_len;
1677 rxm->data_len = rx_packet_len;
1678 rxm->port = rxq->port_id;
1679 ice_rxd_to_vlan_tci(rxm, rxdp);
1680 rxm->packet_type = ptype_tbl[(uint8_t)((qword1 &
1681 ICE_RXD_QW1_PTYPE_M) >>
1682 ICE_RXD_QW1_PTYPE_S)];
1683 pkt_flags = ice_rxd_status_to_pkt_flags(qword1);
1684 pkt_flags |= ice_rxd_error_to_pkt_flags(qword1);
1685 if (pkt_flags & PKT_RX_RSS_HASH)
1687 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1688 rxm->ol_flags |= pkt_flags;
1689 /* copy old mbuf to rx_pkts */
1690 rx_pkts[nb_rx++] = rxm;
1692 rxq->rx_tail = rx_id;
1694 * If the number of free RX descriptors is greater than the RX free
1695 * threshold of the queue, advance the receive tail register of queue.
1696 * Update that register with the value of the last processed RX
1697 * descriptor minus 1.
1699 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1700 if (nb_hold > rxq->rx_free_thresh) {
1701 rx_id = (uint16_t)(rx_id == 0 ?
1702 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1703 /* write TAIL register */
1704 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1707 rxq->nb_rx_hold = nb_hold;
1709 /* return received packet in the burst */
1714 ice_txd_enable_checksum(uint64_t ol_flags,
1716 uint32_t *td_offset,
1717 union ice_tx_offload tx_offload)
1719 /* L2 length must be set. */
1720 *td_offset |= (tx_offload.l2_len >> 1) <<
1721 ICE_TX_DESC_LEN_MACLEN_S;
1723 /* Enable L3 checksum offloads */
1724 if (ol_flags & PKT_TX_IP_CKSUM) {
1725 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1726 *td_offset |= (tx_offload.l3_len >> 2) <<
1727 ICE_TX_DESC_LEN_IPLEN_S;
1728 } else if (ol_flags & PKT_TX_IPV4) {
1729 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1730 *td_offset |= (tx_offload.l3_len >> 2) <<
1731 ICE_TX_DESC_LEN_IPLEN_S;
1732 } else if (ol_flags & PKT_TX_IPV6) {
1733 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1734 *td_offset |= (tx_offload.l3_len >> 2) <<
1735 ICE_TX_DESC_LEN_IPLEN_S;
1738 if (ol_flags & PKT_TX_TCP_SEG) {
1739 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1740 *td_offset |= (tx_offload.l4_len >> 2) <<
1741 ICE_TX_DESC_LEN_L4_LEN_S;
1745 /* Enable L4 checksum offloads */
1746 switch (ol_flags & PKT_TX_L4_MASK) {
1747 case PKT_TX_TCP_CKSUM:
1748 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1749 *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
1750 ICE_TX_DESC_LEN_L4_LEN_S;
1752 case PKT_TX_SCTP_CKSUM:
1753 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1754 *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
1755 ICE_TX_DESC_LEN_L4_LEN_S;
1757 case PKT_TX_UDP_CKSUM:
1758 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1759 *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
1760 ICE_TX_DESC_LEN_L4_LEN_S;
1768 ice_xmit_cleanup(struct ice_tx_queue *txq)
1770 struct ice_tx_entry *sw_ring = txq->sw_ring;
1771 volatile struct ice_tx_desc *txd = txq->tx_ring;
1772 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1773 uint16_t nb_tx_desc = txq->nb_tx_desc;
1774 uint16_t desc_to_clean_to;
1775 uint16_t nb_tx_to_clean;
1777 /* Determine the last descriptor needing to be cleaned */
1778 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
1779 if (desc_to_clean_to >= nb_tx_desc)
1780 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1782 /* Check to make sure the last descriptor to clean is done */
1783 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1784 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
1785 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
1786 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1787 "(port=%d queue=%d) value=0x%"PRIx64"\n",
1789 txq->port_id, txq->queue_id,
1790 txd[desc_to_clean_to].cmd_type_offset_bsz);
1791 /* Failed to clean any descriptors */
1795 /* Figure out how many descriptors will be cleaned */
1796 if (last_desc_cleaned > desc_to_clean_to)
1797 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1800 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1803 /* The last descriptor to clean is done, so that means all the
1804 * descriptors from the last descriptor that was cleaned
1805 * up to the last descriptor with the RS bit set
1806 * are done. Only reset the threshold descriptor.
1808 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1810 /* Update the txq to reflect the last descriptor that was cleaned */
1811 txq->last_desc_cleaned = desc_to_clean_to;
1812 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
1817 /* Construct the tx flags */
1818 static inline uint64_t
1819 ice_build_ctob(uint32_t td_cmd,
1824 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
1825 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
1826 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
1827 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1828 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
1831 /* Check if the context descriptor is needed for TX offloading */
1832 static inline uint16_t
1833 ice_calc_context_desc(uint64_t flags)
1835 static uint64_t mask = PKT_TX_TCP_SEG | PKT_TX_QINQ;
1837 return (flags & mask) ? 1 : 0;
1840 /* set ice TSO context descriptor */
1841 static inline uint64_t
1842 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
1844 uint64_t ctx_desc = 0;
1845 uint32_t cd_cmd, hdr_len, cd_tso_len;
1847 if (!tx_offload.l4_len) {
1848 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1853 * in case of non tunneling packet, the outer_l2_len and
1854 * outer_l3_len must be 0.
1856 hdr_len = tx_offload.outer_l2_len +
1857 tx_offload.outer_l3_len +
1862 cd_cmd = ICE_TX_CTX_DESC_TSO;
1863 cd_tso_len = mbuf->pkt_len - hdr_len;
1864 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
1865 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1866 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
1872 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1874 struct ice_tx_queue *txq;
1875 volatile struct ice_tx_desc *tx_ring;
1876 volatile struct ice_tx_desc *txd;
1877 struct ice_tx_entry *sw_ring;
1878 struct ice_tx_entry *txe, *txn;
1879 struct rte_mbuf *tx_pkt;
1880 struct rte_mbuf *m_seg;
1885 uint32_t td_cmd = 0;
1886 uint32_t td_offset = 0;
1887 uint32_t td_tag = 0;
1889 uint64_t buf_dma_addr;
1891 union ice_tx_offload tx_offload = {0};
1894 sw_ring = txq->sw_ring;
1895 tx_ring = txq->tx_ring;
1896 tx_id = txq->tx_tail;
1897 txe = &sw_ring[tx_id];
1899 /* Check if the descriptor ring needs to be cleaned. */
1900 if (txq->nb_tx_free < txq->tx_free_thresh)
1901 ice_xmit_cleanup(txq);
1903 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1904 tx_pkt = *tx_pkts++;
1907 ol_flags = tx_pkt->ol_flags;
1908 tx_offload.l2_len = tx_pkt->l2_len;
1909 tx_offload.l3_len = tx_pkt->l3_len;
1910 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
1911 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
1912 tx_offload.l4_len = tx_pkt->l4_len;
1913 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1914 /* Calculate the number of context descriptors needed. */
1915 nb_ctx = ice_calc_context_desc(ol_flags);
1917 /* The number of descriptors that must be allocated for
1918 * a packet equals to the number of the segments of that
1919 * packet plus the number of context descriptor if needed.
1921 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1922 tx_last = (uint16_t)(tx_id + nb_used - 1);
1925 if (tx_last >= txq->nb_tx_desc)
1926 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1928 if (nb_used > txq->nb_tx_free) {
1929 if (ice_xmit_cleanup(txq) != 0) {
1934 if (unlikely(nb_used > txq->tx_rs_thresh)) {
1935 while (nb_used > txq->nb_tx_free) {
1936 if (ice_xmit_cleanup(txq) != 0) {
1945 /* Descriptor based VLAN insertion */
1946 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
1947 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
1948 td_tag = tx_pkt->vlan_tci;
1951 /* Enable checksum offloading */
1952 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
1953 ice_txd_enable_checksum(ol_flags, &td_cmd,
1954 &td_offset, tx_offload);
1958 /* Setup TX context descriptor if required */
1959 volatile struct ice_tx_ctx_desc *ctx_txd =
1960 (volatile struct ice_tx_ctx_desc *)
1962 uint16_t cd_l2tag2 = 0;
1963 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
1965 txn = &sw_ring[txe->next_id];
1966 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1968 rte_pktmbuf_free_seg(txe->mbuf);
1972 if (ol_flags & PKT_TX_TCP_SEG)
1973 cd_type_cmd_tso_mss |=
1974 ice_set_tso_ctx(tx_pkt, tx_offload);
1976 /* TX context descriptor based double VLAN insert */
1977 if (ol_flags & PKT_TX_QINQ) {
1978 cd_l2tag2 = tx_pkt->vlan_tci_outer;
1979 cd_type_cmd_tso_mss |=
1980 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
1981 ICE_TXD_CTX_QW1_CMD_S);
1983 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
1985 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1987 txe->last_id = tx_last;
1988 tx_id = txe->next_id;
1994 txd = &tx_ring[tx_id];
1995 txn = &sw_ring[txe->next_id];
1998 rte_pktmbuf_free_seg(txe->mbuf);
2001 /* Setup TX Descriptor */
2002 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2003 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2004 txd->cmd_type_offset_bsz =
2005 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2006 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2007 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2008 ((uint64_t)m_seg->data_len <<
2009 ICE_TXD_QW1_TX_BUF_SZ_S) |
2010 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2012 txe->last_id = tx_last;
2013 tx_id = txe->next_id;
2015 m_seg = m_seg->next;
2018 /* fill the last descriptor with End of Packet (EOP) bit */
2019 td_cmd |= ICE_TX_DESC_CMD_EOP;
2020 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2021 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2023 /* set RS bit on the last descriptor of one packet */
2024 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2025 PMD_TX_FREE_LOG(DEBUG,
2026 "Setting RS bit on TXD id="
2027 "%4u (port=%d queue=%d)",
2028 tx_last, txq->port_id, txq->queue_id);
2030 td_cmd |= ICE_TX_DESC_CMD_RS;
2032 /* Update txq RS bit counters */
2033 txq->nb_tx_used = 0;
2035 txd->cmd_type_offset_bsz |=
2036 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2042 /* update Tail register */
2043 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2044 txq->tx_tail = tx_id;
2049 static inline int __attribute__((always_inline))
2050 ice_tx_free_bufs(struct ice_tx_queue *txq)
2052 struct ice_tx_entry *txep;
2055 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2056 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2057 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2060 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2062 for (i = 0; i < txq->tx_rs_thresh; i++)
2063 rte_prefetch0((txep + i)->mbuf);
2065 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2066 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2067 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2071 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2072 rte_pktmbuf_free_seg(txep->mbuf);
2077 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2078 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2079 if (txq->tx_next_dd >= txq->nb_tx_desc)
2080 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2082 return txq->tx_rs_thresh;
2085 /* Populate 4 descriptors with data from 4 mbufs */
2087 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2092 for (i = 0; i < 4; i++, txdp++, pkts++) {
2093 dma_addr = rte_mbuf_data_iova(*pkts);
2094 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2095 txdp->cmd_type_offset_bsz =
2096 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2097 (*pkts)->data_len, 0);
2101 /* Populate 1 descriptor with data from 1 mbuf */
2103 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2107 dma_addr = rte_mbuf_data_iova(*pkts);
2108 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2109 txdp->cmd_type_offset_bsz =
2110 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2111 (*pkts)->data_len, 0);
2115 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2118 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2119 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2120 const int N_PER_LOOP = 4;
2121 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2122 int mainpart, leftover;
2126 * Process most of the packets in chunks of N pkts. Any
2127 * leftover packets will get processed one at a time.
2129 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2130 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2131 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2132 /* Copy N mbuf pointers to the S/W ring */
2133 for (j = 0; j < N_PER_LOOP; ++j)
2134 (txep + i + j)->mbuf = *(pkts + i + j);
2135 tx4(txdp + i, pkts + i);
2138 if (unlikely(leftover > 0)) {
2139 for (i = 0; i < leftover; ++i) {
2140 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2141 tx1(txdp + mainpart + i, pkts + mainpart + i);
2146 static inline uint16_t
2147 tx_xmit_pkts(struct ice_tx_queue *txq,
2148 struct rte_mbuf **tx_pkts,
2151 volatile struct ice_tx_desc *txr = txq->tx_ring;
2155 * Begin scanning the H/W ring for done descriptors when the number
2156 * of available descriptors drops below tx_free_thresh. For each done
2157 * descriptor, free the associated buffer.
2159 if (txq->nb_tx_free < txq->tx_free_thresh)
2160 ice_tx_free_bufs(txq);
2162 /* Use available descriptor only */
2163 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2164 if (unlikely(!nb_pkts))
2167 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2168 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2169 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2170 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2171 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2172 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2174 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2178 /* Fill hardware descriptor ring with mbuf data */
2179 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2180 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2182 /* Determin if RS bit needs to be set */
2183 if (txq->tx_tail > txq->tx_next_rs) {
2184 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2185 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2188 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2189 if (txq->tx_next_rs >= txq->nb_tx_desc)
2190 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2193 if (txq->tx_tail >= txq->nb_tx_desc)
2196 /* Update the tx tail register */
2198 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2204 ice_xmit_pkts_simple(void *tx_queue,
2205 struct rte_mbuf **tx_pkts,
2210 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2211 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2215 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2218 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2219 &tx_pkts[nb_tx], num);
2220 nb_tx = (uint16_t)(nb_tx + ret);
2221 nb_pkts = (uint16_t)(nb_pkts - ret);
2229 void __attribute__((cold))
2230 ice_set_rx_function(struct rte_eth_dev *dev)
2232 PMD_INIT_FUNC_TRACE();
2233 struct ice_adapter *ad =
2234 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2236 if (dev->data->scattered_rx) {
2237 /* Set the non-LRO scattered function */
2239 "Using a Scattered function on port %d.",
2240 dev->data->port_id);
2241 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2242 } else if (ad->rx_bulk_alloc_allowed) {
2244 "Rx Burst Bulk Alloc Preconditions are "
2245 "satisfied. Rx Burst Bulk Alloc function "
2246 "will be used on port %d.",
2247 dev->data->port_id);
2248 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2251 "Rx Burst Bulk Alloc Preconditions are not "
2252 "satisfied, Normal Rx will be used on port %d.",
2253 dev->data->port_id);
2254 dev->rx_pkt_burst = ice_recv_pkts;
2258 /*********************************************************************
2262 **********************************************************************/
2263 /* The default values of TSO MSS */
2264 #define ICE_MIN_TSO_MSS 64
2265 #define ICE_MAX_TSO_MSS 9728
2266 #define ICE_MAX_TSO_FRAME_SIZE 262144
2268 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2275 for (i = 0; i < nb_pkts; i++) {
2277 ol_flags = m->ol_flags;
2279 if (ol_flags & PKT_TX_TCP_SEG &&
2280 (m->tso_segsz < ICE_MIN_TSO_MSS ||
2281 m->tso_segsz > ICE_MAX_TSO_MSS ||
2282 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
2284 * MSS outside the range are considered malicious
2286 rte_errno = -EINVAL;
2290 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2291 ret = rte_validate_tx_offload(m);
2297 ret = rte_net_intel_cksum_prepare(m);
2306 void __attribute__((cold))
2307 ice_set_tx_function(struct rte_eth_dev *dev)
2309 struct ice_adapter *ad =
2310 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2312 if (ad->tx_simple_allowed) {
2313 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
2314 dev->tx_pkt_burst = ice_xmit_pkts_simple;
2315 dev->tx_pkt_prepare = NULL;
2317 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
2318 dev->tx_pkt_burst = ice_xmit_pkts;
2319 dev->tx_pkt_prepare = ice_prep_pkts;
2323 /* For each value it means, datasheet of hardware can tell more details
2325 * @note: fix ice_dev_supported_ptypes_get() if any change here.
2327 static inline uint32_t
2328 ice_get_default_pkt_type(uint16_t ptype)
2330 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
2331 __rte_cache_aligned = {
2334 [1] = RTE_PTYPE_L2_ETHER,
2335 /* [2] - [5] reserved */
2336 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2337 /* [7] - [10] reserved */
2338 [11] = RTE_PTYPE_L2_ETHER_ARP,
2339 /* [12] - [21] reserved */
2341 /* Non tunneled IPv4 */
2342 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2344 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2345 RTE_PTYPE_L4_NONFRAG,
2346 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2349 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2351 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2353 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2357 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2358 RTE_PTYPE_TUNNEL_IP |
2359 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2360 RTE_PTYPE_INNER_L4_FRAG,
2361 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2362 RTE_PTYPE_TUNNEL_IP |
2363 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2364 RTE_PTYPE_INNER_L4_NONFRAG,
2365 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2366 RTE_PTYPE_TUNNEL_IP |
2367 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2368 RTE_PTYPE_INNER_L4_UDP,
2370 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2371 RTE_PTYPE_TUNNEL_IP |
2372 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2373 RTE_PTYPE_INNER_L4_TCP,
2374 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2375 RTE_PTYPE_TUNNEL_IP |
2376 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2377 RTE_PTYPE_INNER_L4_SCTP,
2378 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2379 RTE_PTYPE_TUNNEL_IP |
2380 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2381 RTE_PTYPE_INNER_L4_ICMP,
2384 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2385 RTE_PTYPE_TUNNEL_IP |
2386 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2387 RTE_PTYPE_INNER_L4_FRAG,
2388 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2389 RTE_PTYPE_TUNNEL_IP |
2390 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2391 RTE_PTYPE_INNER_L4_NONFRAG,
2392 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2393 RTE_PTYPE_TUNNEL_IP |
2394 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2395 RTE_PTYPE_INNER_L4_UDP,
2397 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2398 RTE_PTYPE_TUNNEL_IP |
2399 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2400 RTE_PTYPE_INNER_L4_TCP,
2401 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2402 RTE_PTYPE_TUNNEL_IP |
2403 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2404 RTE_PTYPE_INNER_L4_SCTP,
2405 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2406 RTE_PTYPE_TUNNEL_IP |
2407 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2408 RTE_PTYPE_INNER_L4_ICMP,
2410 /* IPv4 --> GRE/Teredo/VXLAN */
2411 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2412 RTE_PTYPE_TUNNEL_GRENAT,
2414 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2415 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2416 RTE_PTYPE_TUNNEL_GRENAT |
2417 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2418 RTE_PTYPE_INNER_L4_FRAG,
2419 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2420 RTE_PTYPE_TUNNEL_GRENAT |
2421 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2422 RTE_PTYPE_INNER_L4_NONFRAG,
2423 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2424 RTE_PTYPE_TUNNEL_GRENAT |
2425 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2426 RTE_PTYPE_INNER_L4_UDP,
2428 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2429 RTE_PTYPE_TUNNEL_GRENAT |
2430 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2431 RTE_PTYPE_INNER_L4_TCP,
2432 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2433 RTE_PTYPE_TUNNEL_GRENAT |
2434 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2435 RTE_PTYPE_INNER_L4_SCTP,
2436 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2437 RTE_PTYPE_TUNNEL_GRENAT |
2438 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2439 RTE_PTYPE_INNER_L4_ICMP,
2441 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2442 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2443 RTE_PTYPE_TUNNEL_GRENAT |
2444 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2445 RTE_PTYPE_INNER_L4_FRAG,
2446 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2447 RTE_PTYPE_TUNNEL_GRENAT |
2448 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2449 RTE_PTYPE_INNER_L4_NONFRAG,
2450 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2451 RTE_PTYPE_TUNNEL_GRENAT |
2452 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2453 RTE_PTYPE_INNER_L4_UDP,
2455 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2456 RTE_PTYPE_TUNNEL_GRENAT |
2457 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2458 RTE_PTYPE_INNER_L4_TCP,
2459 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2460 RTE_PTYPE_TUNNEL_GRENAT |
2461 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2462 RTE_PTYPE_INNER_L4_SCTP,
2463 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2464 RTE_PTYPE_TUNNEL_GRENAT |
2465 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2466 RTE_PTYPE_INNER_L4_ICMP,
2468 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2469 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2470 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2472 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2473 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2474 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2475 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2476 RTE_PTYPE_INNER_L4_FRAG,
2477 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2478 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2479 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2480 RTE_PTYPE_INNER_L4_NONFRAG,
2481 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2482 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2483 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2484 RTE_PTYPE_INNER_L4_UDP,
2486 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2487 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2488 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2489 RTE_PTYPE_INNER_L4_TCP,
2490 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2491 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2492 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2493 RTE_PTYPE_INNER_L4_SCTP,
2494 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2495 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2496 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2497 RTE_PTYPE_INNER_L4_ICMP,
2499 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2500 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2501 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2502 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2503 RTE_PTYPE_INNER_L4_FRAG,
2504 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2505 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2506 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2507 RTE_PTYPE_INNER_L4_NONFRAG,
2508 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2509 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2510 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2511 RTE_PTYPE_INNER_L4_UDP,
2513 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2514 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2515 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2516 RTE_PTYPE_INNER_L4_TCP,
2517 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2518 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2519 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2520 RTE_PTYPE_INNER_L4_SCTP,
2521 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2522 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2523 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2524 RTE_PTYPE_INNER_L4_ICMP,
2526 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2527 [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2528 RTE_PTYPE_TUNNEL_GRENAT |
2529 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2531 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2532 [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2533 RTE_PTYPE_TUNNEL_GRENAT |
2534 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2535 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2536 RTE_PTYPE_INNER_L4_FRAG,
2537 [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2538 RTE_PTYPE_TUNNEL_GRENAT |
2539 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2540 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2541 RTE_PTYPE_INNER_L4_NONFRAG,
2542 [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2543 RTE_PTYPE_TUNNEL_GRENAT |
2544 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2545 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2546 RTE_PTYPE_INNER_L4_UDP,
2548 [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2549 RTE_PTYPE_TUNNEL_GRENAT |
2550 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2551 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2552 RTE_PTYPE_INNER_L4_TCP,
2553 [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2554 RTE_PTYPE_TUNNEL_GRENAT |
2555 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2556 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2557 RTE_PTYPE_INNER_L4_SCTP,
2558 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2559 RTE_PTYPE_TUNNEL_GRENAT |
2560 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2561 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2562 RTE_PTYPE_INNER_L4_ICMP,
2564 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
2565 [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2566 RTE_PTYPE_TUNNEL_GRENAT |
2567 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2568 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2569 RTE_PTYPE_INNER_L4_FRAG,
2570 [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2571 RTE_PTYPE_TUNNEL_GRENAT |
2572 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2573 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2574 RTE_PTYPE_INNER_L4_NONFRAG,
2575 [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2576 RTE_PTYPE_TUNNEL_GRENAT |
2577 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2578 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2579 RTE_PTYPE_INNER_L4_UDP,
2581 [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2582 RTE_PTYPE_TUNNEL_GRENAT |
2583 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2584 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2585 RTE_PTYPE_INNER_L4_TCP,
2586 [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2587 RTE_PTYPE_TUNNEL_GRENAT |
2588 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2589 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2590 RTE_PTYPE_INNER_L4_SCTP,
2591 [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2592 RTE_PTYPE_TUNNEL_GRENAT |
2593 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2594 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2595 RTE_PTYPE_INNER_L4_ICMP,
2597 /* Non tunneled IPv6 */
2598 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2600 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2601 RTE_PTYPE_L4_NONFRAG,
2602 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2605 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2607 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2609 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2613 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2614 RTE_PTYPE_TUNNEL_IP |
2615 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2616 RTE_PTYPE_INNER_L4_FRAG,
2617 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2618 RTE_PTYPE_TUNNEL_IP |
2619 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2620 RTE_PTYPE_INNER_L4_NONFRAG,
2621 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2622 RTE_PTYPE_TUNNEL_IP |
2623 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2624 RTE_PTYPE_INNER_L4_UDP,
2626 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2627 RTE_PTYPE_TUNNEL_IP |
2628 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2629 RTE_PTYPE_INNER_L4_TCP,
2630 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2631 RTE_PTYPE_TUNNEL_IP |
2632 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2633 RTE_PTYPE_INNER_L4_SCTP,
2634 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2635 RTE_PTYPE_TUNNEL_IP |
2636 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2637 RTE_PTYPE_INNER_L4_ICMP,
2640 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2641 RTE_PTYPE_TUNNEL_IP |
2642 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2643 RTE_PTYPE_INNER_L4_FRAG,
2644 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2645 RTE_PTYPE_TUNNEL_IP |
2646 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2647 RTE_PTYPE_INNER_L4_NONFRAG,
2648 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2649 RTE_PTYPE_TUNNEL_IP |
2650 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2651 RTE_PTYPE_INNER_L4_UDP,
2652 /* [105] reserved */
2653 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2654 RTE_PTYPE_TUNNEL_IP |
2655 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2656 RTE_PTYPE_INNER_L4_TCP,
2657 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2658 RTE_PTYPE_TUNNEL_IP |
2659 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2660 RTE_PTYPE_INNER_L4_SCTP,
2661 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2662 RTE_PTYPE_TUNNEL_IP |
2663 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2664 RTE_PTYPE_INNER_L4_ICMP,
2666 /* IPv6 --> GRE/Teredo/VXLAN */
2667 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2668 RTE_PTYPE_TUNNEL_GRENAT,
2670 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2671 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2672 RTE_PTYPE_TUNNEL_GRENAT |
2673 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2674 RTE_PTYPE_INNER_L4_FRAG,
2675 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2676 RTE_PTYPE_TUNNEL_GRENAT |
2677 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2678 RTE_PTYPE_INNER_L4_NONFRAG,
2679 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2680 RTE_PTYPE_TUNNEL_GRENAT |
2681 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2682 RTE_PTYPE_INNER_L4_UDP,
2683 /* [113] reserved */
2684 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2685 RTE_PTYPE_TUNNEL_GRENAT |
2686 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2687 RTE_PTYPE_INNER_L4_TCP,
2688 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2689 RTE_PTYPE_TUNNEL_GRENAT |
2690 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2691 RTE_PTYPE_INNER_L4_SCTP,
2692 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2693 RTE_PTYPE_TUNNEL_GRENAT |
2694 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2695 RTE_PTYPE_INNER_L4_ICMP,
2697 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2698 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2699 RTE_PTYPE_TUNNEL_GRENAT |
2700 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2701 RTE_PTYPE_INNER_L4_FRAG,
2702 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2703 RTE_PTYPE_TUNNEL_GRENAT |
2704 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2705 RTE_PTYPE_INNER_L4_NONFRAG,
2706 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2707 RTE_PTYPE_TUNNEL_GRENAT |
2708 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2709 RTE_PTYPE_INNER_L4_UDP,
2710 /* [120] reserved */
2711 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2712 RTE_PTYPE_TUNNEL_GRENAT |
2713 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2714 RTE_PTYPE_INNER_L4_TCP,
2715 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2716 RTE_PTYPE_TUNNEL_GRENAT |
2717 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2718 RTE_PTYPE_INNER_L4_SCTP,
2719 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2720 RTE_PTYPE_TUNNEL_GRENAT |
2721 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2722 RTE_PTYPE_INNER_L4_ICMP,
2724 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2725 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2726 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2728 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2729 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2730 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2731 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2732 RTE_PTYPE_INNER_L4_FRAG,
2733 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2734 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2735 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2736 RTE_PTYPE_INNER_L4_NONFRAG,
2737 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2738 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2739 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2740 RTE_PTYPE_INNER_L4_UDP,
2741 /* [128] reserved */
2742 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2743 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2744 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2745 RTE_PTYPE_INNER_L4_TCP,
2746 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2747 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2748 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2749 RTE_PTYPE_INNER_L4_SCTP,
2750 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2751 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2752 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2753 RTE_PTYPE_INNER_L4_ICMP,
2755 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2756 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2757 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2758 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2759 RTE_PTYPE_INNER_L4_FRAG,
2760 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2761 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2762 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2763 RTE_PTYPE_INNER_L4_NONFRAG,
2764 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2765 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2766 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2767 RTE_PTYPE_INNER_L4_UDP,
2768 /* [135] reserved */
2769 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2770 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2771 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2772 RTE_PTYPE_INNER_L4_TCP,
2773 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2774 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2775 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2776 RTE_PTYPE_INNER_L4_SCTP,
2777 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2778 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2779 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2780 RTE_PTYPE_INNER_L4_ICMP,
2782 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
2783 [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2784 RTE_PTYPE_TUNNEL_GRENAT |
2785 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2787 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
2788 [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2789 RTE_PTYPE_TUNNEL_GRENAT |
2790 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2791 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2792 RTE_PTYPE_INNER_L4_FRAG,
2793 [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2794 RTE_PTYPE_TUNNEL_GRENAT |
2795 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2796 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2797 RTE_PTYPE_INNER_L4_NONFRAG,
2798 [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2799 RTE_PTYPE_TUNNEL_GRENAT |
2800 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2801 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2802 RTE_PTYPE_INNER_L4_UDP,
2803 /* [143] reserved */
2804 [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2805 RTE_PTYPE_TUNNEL_GRENAT |
2806 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2807 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2808 RTE_PTYPE_INNER_L4_TCP,
2809 [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2810 RTE_PTYPE_TUNNEL_GRENAT |
2811 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2812 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2813 RTE_PTYPE_INNER_L4_SCTP,
2814 [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2815 RTE_PTYPE_TUNNEL_GRENAT |
2816 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2817 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2818 RTE_PTYPE_INNER_L4_ICMP,
2820 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
2821 [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2822 RTE_PTYPE_TUNNEL_GRENAT |
2823 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2824 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2825 RTE_PTYPE_INNER_L4_FRAG,
2826 [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2827 RTE_PTYPE_TUNNEL_GRENAT |
2828 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2829 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2830 RTE_PTYPE_INNER_L4_NONFRAG,
2831 [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2832 RTE_PTYPE_TUNNEL_GRENAT |
2833 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2834 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2835 RTE_PTYPE_INNER_L4_UDP,
2836 /* [150] reserved */
2837 [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2838 RTE_PTYPE_TUNNEL_GRENAT |
2839 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2840 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2841 RTE_PTYPE_INNER_L4_TCP,
2842 [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2843 RTE_PTYPE_TUNNEL_GRENAT |
2844 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2845 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2846 RTE_PTYPE_INNER_L4_SCTP,
2847 [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2848 RTE_PTYPE_TUNNEL_GRENAT |
2849 RTE_PTYPE_INNER_L2_ETHER_VLAN |
2850 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2851 RTE_PTYPE_INNER_L4_ICMP,
2852 /* [154] - [255] reserved */
2853 [256] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2854 RTE_PTYPE_TUNNEL_GTPC,
2855 [257] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2856 RTE_PTYPE_TUNNEL_GTPC,
2857 [258] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2858 RTE_PTYPE_TUNNEL_GTPU,
2859 [259] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2860 RTE_PTYPE_TUNNEL_GTPU,
2861 /* [260] - [263] reserved */
2862 [264] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2863 RTE_PTYPE_TUNNEL_GTPC,
2864 [265] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2865 RTE_PTYPE_TUNNEL_GTPC,
2866 [266] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2867 RTE_PTYPE_TUNNEL_GTPU,
2868 [267] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2869 RTE_PTYPE_TUNNEL_GTPU,
2871 /* All others reserved */
2874 return type_table[ptype];
2877 void __attribute__((cold))
2878 ice_set_default_ptype_table(struct rte_eth_dev *dev)
2880 struct ice_adapter *ad =
2881 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2884 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
2885 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);