1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
8 #include "rte_pmd_ice.h"
11 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
15 PKT_TX_OUTER_IP_CKSUM)
17 /* Offset of mbuf dynamic field for protocol extraction data */
18 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
20 /* Mask of mbuf dynamic flags for protocol extraction type */
21 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
22 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
28 static inline uint64_t
29 ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid)
35 [ICE_RXDID_COMMS_AUX_VLAN] = {
36 &rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
37 [ICE_RXDID_COMMS_AUX_IPV4] = {
38 &rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
39 [ICE_RXDID_COMMS_AUX_IPV6] = {
40 &rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
41 [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
42 &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, true },
43 [ICE_RXDID_COMMS_AUX_TCP] = {
44 &rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
45 [ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
46 &rte_net_ice_dynflag_proto_xtr_ip_offset_mask, false },
50 if (rxdid < RTE_DIM(ol_flag_map)) {
51 ol_flag = ol_flag_map[rxdid].ol_flag;
55 *chk_valid = ol_flag_map[rxdid].chk_valid;
63 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
65 static uint8_t rxdid_map[] = {
66 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_GENERIC,
67 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
68 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
69 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
70 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
71 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
72 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
75 return xtr_type < RTE_DIM(rxdid_map) ?
76 rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC;
79 static enum ice_status
80 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
82 struct ice_vsi *vsi = rxq->vsi;
83 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
84 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
85 struct ice_rlan_ctx rx_ctx;
87 uint16_t buf_size, len;
88 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
89 uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
92 /* Set buffer size as the head split is disabled. */
93 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
94 RTE_PKTMBUF_HEADROOM);
96 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
97 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
98 rxq->max_pkt_len = RTE_MIN(len,
99 dev->data->dev_conf.rxmode.max_rx_pkt_len);
101 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
102 if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
103 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
104 PMD_DRV_LOG(ERR, "maximum packet length must "
105 "be larger than %u and smaller than %u,"
106 "as jumbo frame is enabled",
107 (uint32_t)RTE_ETHER_MAX_LEN,
108 (uint32_t)ICE_FRAME_SIZE_MAX);
112 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
113 rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
114 PMD_DRV_LOG(ERR, "maximum packet length must be "
115 "larger than %u and smaller than %u, "
116 "as jumbo frame is disabled",
117 (uint32_t)RTE_ETHER_MIN_LEN,
118 (uint32_t)RTE_ETHER_MAX_LEN);
123 memset(&rx_ctx, 0, sizeof(rx_ctx));
125 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
126 rx_ctx.qlen = rxq->nb_rx_desc;
127 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
128 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
129 rx_ctx.dtype = 0; /* No Header Split mode */
130 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
131 rx_ctx.dsize = 1; /* 32B descriptors */
133 rx_ctx.rxmax = rxq->max_pkt_len;
134 /* TPH: Transaction Layer Packet (TLP) processing hints */
135 rx_ctx.tphrdesc_ena = 1;
136 rx_ctx.tphwdesc_ena = 1;
137 rx_ctx.tphdata_ena = 1;
138 rx_ctx.tphhead_ena = 1;
139 /* Low Receive Queue Threshold defined in 64 descriptors units.
140 * When the number of free descriptors goes below the lrxqthresh,
141 * an immediate interrupt is triggered.
143 rx_ctx.lrxqthresh = 2;
144 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
147 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
149 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
151 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
152 rxq->port_id, rxq->queue_id, rxdid);
154 /* Enable Flexible Descriptors in the queue context which
155 * allows this driver to select a specific receive descriptor format
157 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
158 QRXFLXP_CNTXT_RXDID_IDX_M;
160 /* increasing context priority to pick up profile ID;
161 * default is 0x01; setting to 0x03 to ensure profile
162 * is programming if prev context is of same priority
164 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
165 QRXFLXP_CNTXT_RXDID_PRIO_M;
167 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
169 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
171 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
175 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
177 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
182 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
183 RTE_PKTMBUF_HEADROOM);
185 /* Check if scattered RX needs to be used. */
186 if (rxq->max_pkt_len > buf_size)
187 dev->data->scattered_rx = 1;
189 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
191 /* Init the Rx tail register*/
192 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
197 /* Allocate mbufs for all descriptors in rx queue */
199 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
201 struct ice_rx_entry *rxe = rxq->sw_ring;
205 for (i = 0; i < rxq->nb_rx_desc; i++) {
206 volatile union ice_rx_flex_desc *rxd;
207 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
209 if (unlikely(!mbuf)) {
210 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
214 rte_mbuf_refcnt_set(mbuf, 1);
216 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
218 mbuf->port = rxq->port_id;
221 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
223 rxd = &rxq->rx_ring[i];
224 rxd->read.pkt_addr = dma_addr;
225 rxd->read.hdr_addr = 0;
226 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
236 /* Free all mbufs for descriptors in rx queue */
238 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
242 if (!rxq || !rxq->sw_ring) {
243 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
247 for (i = 0; i < rxq->nb_rx_desc; i++) {
248 if (rxq->sw_ring[i].mbuf) {
249 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
250 rxq->sw_ring[i].mbuf = NULL;
253 if (rxq->rx_nb_avail == 0)
255 for (i = 0; i < rxq->rx_nb_avail; i++)
256 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
258 rxq->rx_nb_avail = 0;
261 /* turn on or off rx queue
262 * @q_idx: queue index in pf scope
263 * @on: turn on or off the queue
266 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
271 /* QRX_CTRL = QRX_ENA */
272 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
275 if (reg & QRX_CTRL_QENA_STAT_M)
276 return 0; /* Already on, skip */
277 reg |= QRX_CTRL_QENA_REQ_M;
279 if (!(reg & QRX_CTRL_QENA_STAT_M))
280 return 0; /* Already off, skip */
281 reg &= ~QRX_CTRL_QENA_REQ_M;
284 /* Write the register */
285 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
286 /* Check the result. It is said that QENA_STAT
287 * follows the QENA_REQ not more than 10 use.
288 * TODO: need to change the wait counter later
290 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
291 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
292 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
294 if ((reg & QRX_CTRL_QENA_REQ_M) &&
295 (reg & QRX_CTRL_QENA_STAT_M))
298 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
299 !(reg & QRX_CTRL_QENA_STAT_M))
304 /* Check if it is timeout */
305 if (j >= ICE_CHK_Q_ENA_COUNT) {
306 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
307 (on ? "enable" : "disable"), q_idx);
315 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
319 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
320 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
321 "rxq->rx_free_thresh=%d, "
322 "ICE_RX_MAX_BURST=%d",
323 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
325 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
326 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
327 "rxq->rx_free_thresh=%d, "
328 "rxq->nb_rx_desc=%d",
329 rxq->rx_free_thresh, rxq->nb_rx_desc);
331 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
332 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
333 "rxq->nb_rx_desc=%d, "
334 "rxq->rx_free_thresh=%d",
335 rxq->nb_rx_desc, rxq->rx_free_thresh);
342 /* reset fields in ice_rx_queue back to default */
344 ice_reset_rx_queue(struct ice_rx_queue *rxq)
350 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
354 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
356 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
357 ((volatile char *)rxq->rx_ring)[i] = 0;
359 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
360 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
361 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
363 rxq->rx_nb_avail = 0;
364 rxq->rx_next_avail = 0;
365 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
369 rxq->pkt_first_seg = NULL;
370 rxq->pkt_last_seg = NULL;
372 rxq->rxrearm_start = 0;
377 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
379 struct ice_rx_queue *rxq;
381 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
383 PMD_INIT_FUNC_TRACE();
385 if (rx_queue_id >= dev->data->nb_rx_queues) {
386 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
387 rx_queue_id, dev->data->nb_rx_queues);
391 rxq = dev->data->rx_queues[rx_queue_id];
392 if (!rxq || !rxq->q_set) {
393 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
398 err = ice_program_hw_rx_queue(rxq);
400 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
405 err = ice_alloc_rx_queue_mbufs(rxq);
407 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
411 /* Init the RX tail register. */
412 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
414 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
416 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
419 rxq->rx_rel_mbufs(rxq);
420 ice_reset_rx_queue(rxq);
424 dev->data->rx_queue_state[rx_queue_id] =
425 RTE_ETH_QUEUE_STATE_STARTED;
431 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
433 struct ice_rx_queue *rxq;
435 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
437 if (rx_queue_id < dev->data->nb_rx_queues) {
438 rxq = dev->data->rx_queues[rx_queue_id];
440 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
442 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
446 rxq->rx_rel_mbufs(rxq);
447 ice_reset_rx_queue(rxq);
448 dev->data->rx_queue_state[rx_queue_id] =
449 RTE_ETH_QUEUE_STATE_STOPPED;
456 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
458 struct ice_tx_queue *txq;
462 struct ice_aqc_add_tx_qgrp txq_elem;
463 struct ice_tlan_ctx tx_ctx;
465 PMD_INIT_FUNC_TRACE();
467 if (tx_queue_id >= dev->data->nb_tx_queues) {
468 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
469 tx_queue_id, dev->data->nb_tx_queues);
473 txq = dev->data->tx_queues[tx_queue_id];
474 if (!txq || !txq->q_set) {
475 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
481 hw = ICE_VSI_TO_HW(vsi);
483 memset(&txq_elem, 0, sizeof(txq_elem));
484 memset(&tx_ctx, 0, sizeof(tx_ctx));
485 txq_elem.num_txqs = 1;
486 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
488 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
489 tx_ctx.qlen = txq->nb_tx_desc;
490 tx_ctx.pf_num = hw->pf_id;
491 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
492 tx_ctx.src_vsi = vsi->vsi_id;
493 tx_ctx.port_num = hw->port_info->lport;
494 tx_ctx.tso_ena = 1; /* tso enable */
495 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
496 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
498 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
501 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
503 /* Init the Tx tail register*/
504 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
506 /* Fix me, we assume TC always 0 here */
507 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
508 &txq_elem, sizeof(txq_elem), NULL);
510 PMD_DRV_LOG(ERR, "Failed to add lan txq");
513 /* store the schedule node id */
514 txq->q_teid = txq_elem.txqs[0].q_teid;
516 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
520 static enum ice_status
521 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
523 struct ice_vsi *vsi = rxq->vsi;
524 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
525 uint32_t rxdid = ICE_RXDID_LEGACY_1;
526 struct ice_rlan_ctx rx_ctx;
531 rxq->rx_buf_len = 1024;
533 memset(&rx_ctx, 0, sizeof(rx_ctx));
535 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
536 rx_ctx.qlen = rxq->nb_rx_desc;
537 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
538 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
539 rx_ctx.dtype = 0; /* No Header Split mode */
540 rx_ctx.dsize = 1; /* 32B descriptors */
541 rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
542 /* TPH: Transaction Layer Packet (TLP) processing hints */
543 rx_ctx.tphrdesc_ena = 1;
544 rx_ctx.tphwdesc_ena = 1;
545 rx_ctx.tphdata_ena = 1;
546 rx_ctx.tphhead_ena = 1;
547 /* Low Receive Queue Threshold defined in 64 descriptors units.
548 * When the number of free descriptors goes below the lrxqthresh,
549 * an immediate interrupt is triggered.
551 rx_ctx.lrxqthresh = 2;
552 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
555 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
557 /* Enable Flexible Descriptors in the queue context which
558 * allows this driver to select a specific receive descriptor format
560 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
561 QRXFLXP_CNTXT_RXDID_IDX_M;
563 /* increasing context priority to pick up profile ID;
564 * default is 0x01; setting to 0x03 to ensure profile
565 * is programming if prev context is of same priority
567 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
568 QRXFLXP_CNTXT_RXDID_PRIO_M;
570 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
572 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
574 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
578 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
580 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
585 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
587 /* Init the Rx tail register*/
588 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
594 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
596 struct ice_rx_queue *rxq;
598 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
599 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
601 PMD_INIT_FUNC_TRACE();
604 if (!rxq || !rxq->q_set) {
605 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
610 err = ice_fdir_program_hw_rx_queue(rxq);
612 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
617 /* Init the RX tail register. */
618 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
620 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
622 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
625 ice_reset_rx_queue(rxq);
633 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
635 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
636 struct ice_tx_queue *txq;
640 struct ice_aqc_add_tx_qgrp txq_elem;
641 struct ice_tlan_ctx tx_ctx;
643 PMD_INIT_FUNC_TRACE();
646 if (!txq || !txq->q_set) {
647 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
653 hw = ICE_VSI_TO_HW(vsi);
655 memset(&txq_elem, 0, sizeof(txq_elem));
656 memset(&tx_ctx, 0, sizeof(tx_ctx));
657 txq_elem.num_txqs = 1;
658 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
660 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
661 tx_ctx.qlen = txq->nb_tx_desc;
662 tx_ctx.pf_num = hw->pf_id;
663 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
664 tx_ctx.src_vsi = vsi->vsi_id;
665 tx_ctx.port_num = hw->port_info->lport;
666 tx_ctx.tso_ena = 1; /* tso enable */
667 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
668 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
670 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
673 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
675 /* Init the Tx tail register*/
676 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
678 /* Fix me, we assume TC always 0 here */
679 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
680 &txq_elem, sizeof(txq_elem), NULL);
682 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
685 /* store the schedule node id */
686 txq->q_teid = txq_elem.txqs[0].q_teid;
691 /* Free all mbufs for descriptors in tx queue */
693 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
697 if (!txq || !txq->sw_ring) {
698 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
702 for (i = 0; i < txq->nb_tx_desc; i++) {
703 if (txq->sw_ring[i].mbuf) {
704 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
705 txq->sw_ring[i].mbuf = NULL;
711 ice_reset_tx_queue(struct ice_tx_queue *txq)
713 struct ice_tx_entry *txe;
714 uint16_t i, prev, size;
717 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
722 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
723 for (i = 0; i < size; i++)
724 ((volatile char *)txq->tx_ring)[i] = 0;
726 prev = (uint16_t)(txq->nb_tx_desc - 1);
727 for (i = 0; i < txq->nb_tx_desc; i++) {
728 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
730 txd->cmd_type_offset_bsz =
731 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
734 txe[prev].next_id = i;
738 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
739 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
744 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
745 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
749 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
751 struct ice_tx_queue *txq;
752 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
753 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
754 struct ice_vsi *vsi = pf->main_vsi;
755 enum ice_status status;
758 uint16_t q_handle = tx_queue_id;
760 if (tx_queue_id >= dev->data->nb_tx_queues) {
761 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
762 tx_queue_id, dev->data->nb_tx_queues);
766 txq = dev->data->tx_queues[tx_queue_id];
768 PMD_DRV_LOG(ERR, "TX queue %u is not available",
773 q_ids[0] = txq->reg_idx;
774 q_teids[0] = txq->q_teid;
776 /* Fix me, we assume TC always 0 here */
777 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
778 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
779 if (status != ICE_SUCCESS) {
780 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
784 txq->tx_rel_mbufs(txq);
785 ice_reset_tx_queue(txq);
786 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
792 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
794 struct ice_rx_queue *rxq;
796 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
797 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
801 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
803 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
807 rxq->rx_rel_mbufs(rxq);
813 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
815 struct ice_tx_queue *txq;
816 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
817 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
818 struct ice_vsi *vsi = pf->main_vsi;
819 enum ice_status status;
822 uint16_t q_handle = tx_queue_id;
826 PMD_DRV_LOG(ERR, "TX queue %u is not available",
832 q_ids[0] = txq->reg_idx;
833 q_teids[0] = txq->q_teid;
835 /* Fix me, we assume TC always 0 here */
836 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
837 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
838 if (status != ICE_SUCCESS) {
839 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
843 txq->tx_rel_mbufs(txq);
849 ice_rx_queue_setup(struct rte_eth_dev *dev,
852 unsigned int socket_id,
853 const struct rte_eth_rxconf *rx_conf,
854 struct rte_mempool *mp)
856 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
857 struct ice_adapter *ad =
858 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
859 struct ice_vsi *vsi = pf->main_vsi;
860 struct ice_rx_queue *rxq;
861 const struct rte_memzone *rz;
864 int use_def_burst_func = 1;
866 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
867 nb_desc > ICE_MAX_RING_DESC ||
868 nb_desc < ICE_MIN_RING_DESC) {
869 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
874 /* Free memory if needed */
875 if (dev->data->rx_queues[queue_idx]) {
876 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
877 dev->data->rx_queues[queue_idx] = NULL;
880 /* Allocate the rx queue data structure */
881 rxq = rte_zmalloc_socket(NULL,
882 sizeof(struct ice_rx_queue),
886 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
887 "rx queue data structure");
891 rxq->nb_rx_desc = nb_desc;
892 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
893 rxq->queue_id = queue_idx;
895 rxq->reg_idx = vsi->base_queue + queue_idx;
896 rxq->port_id = dev->data->port_id;
897 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
898 rxq->crc_len = RTE_ETHER_CRC_LEN;
902 rxq->drop_en = rx_conf->rx_drop_en;
904 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
905 rxq->proto_xtr = pf->proto_xtr != NULL ?
906 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
908 /* Allocate the maximun number of RX ring hardware descriptor. */
909 len = ICE_MAX_RING_DESC;
912 * Allocating a little more memory because vectorized/bulk_alloc Rx
913 * functions doesn't check boundaries each time.
915 len += ICE_RX_MAX_BURST;
917 /* Allocate the maximum number of RX ring hardware descriptor. */
918 ring_size = sizeof(union ice_rx_flex_desc) * len;
919 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
920 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
921 ring_size, ICE_RING_BASE_ALIGN,
924 ice_rx_queue_release(rxq);
925 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
929 /* Zero all the descriptors in the ring. */
930 memset(rz->addr, 0, ring_size);
932 rxq->rx_ring_dma = rz->iova;
933 rxq->rx_ring = rz->addr;
935 /* always reserve more for bulk alloc */
936 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
938 /* Allocate the software ring. */
939 rxq->sw_ring = rte_zmalloc_socket(NULL,
940 sizeof(struct ice_rx_entry) * len,
944 ice_rx_queue_release(rxq);
945 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
949 ice_reset_rx_queue(rxq);
951 dev->data->rx_queues[queue_idx] = rxq;
952 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
954 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
956 if (!use_def_burst_func) {
957 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
958 "satisfied. Rx Burst Bulk Alloc function will be "
959 "used on port=%d, queue=%d.",
960 rxq->port_id, rxq->queue_id);
962 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
963 "not satisfied, Scattered Rx is requested. "
964 "on port=%d, queue=%d.",
965 rxq->port_id, rxq->queue_id);
966 ad->rx_bulk_alloc_allowed = false;
973 ice_rx_queue_release(void *rxq)
975 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
978 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
983 rte_free(q->sw_ring);
988 ice_tx_queue_setup(struct rte_eth_dev *dev,
991 unsigned int socket_id,
992 const struct rte_eth_txconf *tx_conf)
994 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
995 struct ice_vsi *vsi = pf->main_vsi;
996 struct ice_tx_queue *txq;
997 const struct rte_memzone *tz;
999 uint16_t tx_rs_thresh, tx_free_thresh;
1002 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1004 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1005 nb_desc > ICE_MAX_RING_DESC ||
1006 nb_desc < ICE_MIN_RING_DESC) {
1007 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1008 "invalid", nb_desc);
1013 * The following two parameters control the setting of the RS bit on
1014 * transmit descriptors. TX descriptors will have their RS bit set
1015 * after txq->tx_rs_thresh descriptors have been used. The TX
1016 * descriptor ring will be cleaned after txq->tx_free_thresh
1017 * descriptors are used or if the number of descriptors required to
1018 * transmit a packet is greater than the number of free TX descriptors.
1020 * The following constraints must be satisfied:
1021 * - tx_rs_thresh must be greater than 0.
1022 * - tx_rs_thresh must be less than the size of the ring minus 2.
1023 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1024 * - tx_rs_thresh must be a divisor of the ring size.
1025 * - tx_free_thresh must be greater than 0.
1026 * - tx_free_thresh must be less than the size of the ring minus 3.
1027 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1029 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1030 * race condition, hence the maximum threshold constraints. When set
1031 * to zero use default values.
1033 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1034 tx_conf->tx_free_thresh :
1035 ICE_DEFAULT_TX_FREE_THRESH);
1036 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1038 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1039 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1040 if (tx_conf->tx_rs_thresh)
1041 tx_rs_thresh = tx_conf->tx_rs_thresh;
1042 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1043 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1044 "exceed nb_desc. (tx_rs_thresh=%u "
1045 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1046 (unsigned int)tx_rs_thresh,
1047 (unsigned int)tx_free_thresh,
1048 (unsigned int)nb_desc,
1049 (int)dev->data->port_id,
1053 if (tx_rs_thresh >= (nb_desc - 2)) {
1054 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1055 "number of TX descriptors minus 2. "
1056 "(tx_rs_thresh=%u port=%d queue=%d)",
1057 (unsigned int)tx_rs_thresh,
1058 (int)dev->data->port_id,
1062 if (tx_free_thresh >= (nb_desc - 3)) {
1063 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1064 "tx_free_thresh must be less than the "
1065 "number of TX descriptors minus 3. "
1066 "(tx_free_thresh=%u port=%d queue=%d)",
1067 (unsigned int)tx_free_thresh,
1068 (int)dev->data->port_id,
1072 if (tx_rs_thresh > tx_free_thresh) {
1073 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1074 "equal to tx_free_thresh. (tx_free_thresh=%u"
1075 " tx_rs_thresh=%u port=%d queue=%d)",
1076 (unsigned int)tx_free_thresh,
1077 (unsigned int)tx_rs_thresh,
1078 (int)dev->data->port_id,
1082 if ((nb_desc % tx_rs_thresh) != 0) {
1083 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1084 "number of TX descriptors. (tx_rs_thresh=%u"
1085 " port=%d queue=%d)",
1086 (unsigned int)tx_rs_thresh,
1087 (int)dev->data->port_id,
1091 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1092 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1093 "tx_rs_thresh is greater than 1. "
1094 "(tx_rs_thresh=%u port=%d queue=%d)",
1095 (unsigned int)tx_rs_thresh,
1096 (int)dev->data->port_id,
1101 /* Free memory if needed. */
1102 if (dev->data->tx_queues[queue_idx]) {
1103 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1104 dev->data->tx_queues[queue_idx] = NULL;
1107 /* Allocate the TX queue data structure. */
1108 txq = rte_zmalloc_socket(NULL,
1109 sizeof(struct ice_tx_queue),
1110 RTE_CACHE_LINE_SIZE,
1113 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1114 "tx queue structure");
1118 /* Allocate TX hardware ring descriptors. */
1119 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1120 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1121 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1122 ring_size, ICE_RING_BASE_ALIGN,
1125 ice_tx_queue_release(txq);
1126 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1130 txq->nb_tx_desc = nb_desc;
1131 txq->tx_rs_thresh = tx_rs_thresh;
1132 txq->tx_free_thresh = tx_free_thresh;
1133 txq->pthresh = tx_conf->tx_thresh.pthresh;
1134 txq->hthresh = tx_conf->tx_thresh.hthresh;
1135 txq->wthresh = tx_conf->tx_thresh.wthresh;
1136 txq->queue_id = queue_idx;
1138 txq->reg_idx = vsi->base_queue + queue_idx;
1139 txq->port_id = dev->data->port_id;
1140 txq->offloads = offloads;
1142 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1144 txq->tx_ring_dma = tz->iova;
1145 txq->tx_ring = tz->addr;
1147 /* Allocate software ring */
1149 rte_zmalloc_socket(NULL,
1150 sizeof(struct ice_tx_entry) * nb_desc,
1151 RTE_CACHE_LINE_SIZE,
1153 if (!txq->sw_ring) {
1154 ice_tx_queue_release(txq);
1155 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1159 ice_reset_tx_queue(txq);
1161 dev->data->tx_queues[queue_idx] = txq;
1162 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1163 ice_set_tx_function_flag(dev, txq);
1169 ice_tx_queue_release(void *txq)
1171 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1174 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1179 rte_free(q->sw_ring);
1184 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1185 struct rte_eth_rxq_info *qinfo)
1187 struct ice_rx_queue *rxq;
1189 rxq = dev->data->rx_queues[queue_id];
1191 qinfo->mp = rxq->mp;
1192 qinfo->scattered_rx = dev->data->scattered_rx;
1193 qinfo->nb_desc = rxq->nb_rx_desc;
1195 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1196 qinfo->conf.rx_drop_en = rxq->drop_en;
1197 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1201 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1202 struct rte_eth_txq_info *qinfo)
1204 struct ice_tx_queue *txq;
1206 txq = dev->data->tx_queues[queue_id];
1208 qinfo->nb_desc = txq->nb_tx_desc;
1210 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1211 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1212 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1214 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1215 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1216 qinfo->conf.offloads = txq->offloads;
1217 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1221 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1223 #define ICE_RXQ_SCAN_INTERVAL 4
1224 volatile union ice_rx_flex_desc *rxdp;
1225 struct ice_rx_queue *rxq;
1228 rxq = dev->data->rx_queues[rx_queue_id];
1229 rxdp = &rxq->rx_ring[rxq->rx_tail];
1230 while ((desc < rxq->nb_rx_desc) &&
1231 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1232 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1234 * Check the DD bit of a rx descriptor of each 4 in a group,
1235 * to avoid checking too frequently and downgrading performance
1238 desc += ICE_RXQ_SCAN_INTERVAL;
1239 rxdp += ICE_RXQ_SCAN_INTERVAL;
1240 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1241 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1242 desc - rxq->nb_rx_desc]);
1248 #define ICE_RX_FLEX_ERR0_BITS \
1249 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1250 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1251 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1252 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1253 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1254 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1256 /* Rx L3/L4 checksum */
1257 static inline uint64_t
1258 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1262 /* check if HW has decoded the packet and checksum */
1263 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1266 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1267 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1271 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1272 flags |= PKT_RX_IP_CKSUM_BAD;
1274 flags |= PKT_RX_IP_CKSUM_GOOD;
1276 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1277 flags |= PKT_RX_L4_CKSUM_BAD;
1279 flags |= PKT_RX_L4_CKSUM_GOOD;
1281 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1282 flags |= PKT_RX_EIP_CKSUM_BAD;
1288 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1290 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1291 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1292 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1294 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1295 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1296 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1301 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1302 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1303 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1304 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1305 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1306 mb->vlan_tci_outer = mb->vlan_tci;
1307 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1308 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1309 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1310 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1312 mb->vlan_tci_outer = 0;
1315 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1316 mb->vlan_tci, mb->vlan_tci_outer);
1319 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1320 #define ICE_RX_PROTO_XTR_VALID \
1321 ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
1322 (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
1325 ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
1326 volatile struct ice_32b_rx_flex_desc_comms *desc)
1328 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
1329 uint32_t metadata = 0;
1333 ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
1334 if (unlikely(!ol_flag))
1338 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
1339 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
1341 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
1343 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
1345 if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)
1346 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
1347 else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
1348 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
1354 mb->ol_flags |= ol_flag;
1356 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
1361 ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
1362 volatile union ice_rx_flex_desc *rxdp)
1364 volatile struct ice_32b_rx_flex_desc_comms *desc =
1365 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
1368 stat_err = rte_le_to_cpu_16(desc->status_error0);
1369 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
1370 mb->ol_flags |= PKT_RX_RSS_HASH;
1371 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
1374 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1375 if (desc->flow_id != 0xFFFFFFFF) {
1376 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1377 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
1380 if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
1381 ice_rxd_to_proto_xtr(mb, desc);
1385 #define ICE_LOOK_AHEAD 8
1386 #if (ICE_LOOK_AHEAD != 8)
1387 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1390 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1392 volatile union ice_rx_flex_desc *rxdp;
1393 struct ice_rx_entry *rxep;
1394 struct rte_mbuf *mb;
1397 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1398 int32_t i, j, nb_rx = 0;
1399 uint64_t pkt_flags = 0;
1400 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1402 rxdp = &rxq->rx_ring[rxq->rx_tail];
1403 rxep = &rxq->sw_ring[rxq->rx_tail];
1405 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1407 /* Make sure there is at least 1 packet to receive */
1408 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1412 * Scan LOOK_AHEAD descriptors at a time to determine which
1413 * descriptors reference packets that are ready to be received.
1415 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1416 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1417 /* Read desc statuses backwards to avoid race condition */
1418 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1419 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1423 /* Compute how many status bits were set */
1424 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1425 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1429 /* Translate descriptor info to mbuf parameters */
1430 for (j = 0; j < nb_dd; j++) {
1432 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1433 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1434 mb->data_len = pkt_len;
1435 mb->pkt_len = pkt_len;
1437 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1438 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1439 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1440 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1441 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1442 ice_rxd_to_pkt_fields(mb, &rxdp[j]);
1444 mb->ol_flags |= pkt_flags;
1447 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1448 rxq->rx_stage[i + j] = rxep[j].mbuf;
1450 if (nb_dd != ICE_LOOK_AHEAD)
1454 /* Clear software ring entries */
1455 for (i = 0; i < nb_rx; i++)
1456 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1458 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1459 "port_id=%u, queue_id=%u, nb_rx=%d",
1460 rxq->port_id, rxq->queue_id, nb_rx);
1465 static inline uint16_t
1466 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1467 struct rte_mbuf **rx_pkts,
1471 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1473 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1475 for (i = 0; i < nb_pkts; i++)
1476 rx_pkts[i] = stage[i];
1478 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1479 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1485 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1487 volatile union ice_rx_flex_desc *rxdp;
1488 struct ice_rx_entry *rxep;
1489 struct rte_mbuf *mb;
1490 uint16_t alloc_idx, i;
1494 /* Allocate buffers in bulk */
1495 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1496 (rxq->rx_free_thresh - 1));
1497 rxep = &rxq->sw_ring[alloc_idx];
1498 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1499 rxq->rx_free_thresh);
1500 if (unlikely(diag != 0)) {
1501 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1505 rxdp = &rxq->rx_ring[alloc_idx];
1506 for (i = 0; i < rxq->rx_free_thresh; i++) {
1507 if (likely(i < (rxq->rx_free_thresh - 1)))
1508 /* Prefetch next mbuf */
1509 rte_prefetch0(rxep[i + 1].mbuf);
1512 rte_mbuf_refcnt_set(mb, 1);
1514 mb->data_off = RTE_PKTMBUF_HEADROOM;
1516 mb->port = rxq->port_id;
1517 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1518 rxdp[i].read.hdr_addr = 0;
1519 rxdp[i].read.pkt_addr = dma_addr;
1522 /* Update rx tail regsiter */
1523 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1525 rxq->rx_free_trigger =
1526 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1527 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1528 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1533 static inline uint16_t
1534 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1536 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1538 struct rte_eth_dev *dev;
1543 if (rxq->rx_nb_avail)
1544 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1546 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1547 rxq->rx_next_avail = 0;
1548 rxq->rx_nb_avail = nb_rx;
1549 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1551 if (rxq->rx_tail > rxq->rx_free_trigger) {
1552 if (ice_rx_alloc_bufs(rxq) != 0) {
1555 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1556 dev->data->rx_mbuf_alloc_failed +=
1557 rxq->rx_free_thresh;
1558 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1559 "port_id=%u, queue_id=%u",
1560 rxq->port_id, rxq->queue_id);
1561 rxq->rx_nb_avail = 0;
1562 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1563 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1564 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1570 if (rxq->rx_tail >= rxq->nb_rx_desc)
1573 if (rxq->rx_nb_avail)
1574 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1580 ice_recv_pkts_bulk_alloc(void *rx_queue,
1581 struct rte_mbuf **rx_pkts,
1588 if (unlikely(nb_pkts == 0))
1591 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1592 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1595 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1596 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1597 nb_rx = (uint16_t)(nb_rx + count);
1598 nb_pkts = (uint16_t)(nb_pkts - count);
1607 ice_recv_scattered_pkts(void *rx_queue,
1608 struct rte_mbuf **rx_pkts,
1611 struct ice_rx_queue *rxq = rx_queue;
1612 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1613 volatile union ice_rx_flex_desc *rxdp;
1614 union ice_rx_flex_desc rxd;
1615 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1616 struct ice_rx_entry *rxe;
1617 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1618 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1619 struct rte_mbuf *nmb; /* new allocated mbuf */
1620 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1621 uint16_t rx_id = rxq->rx_tail;
1623 uint16_t nb_hold = 0;
1624 uint16_t rx_packet_len;
1625 uint16_t rx_stat_err0;
1628 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1629 struct rte_eth_dev *dev;
1631 while (nb_rx < nb_pkts) {
1632 rxdp = &rx_ring[rx_id];
1633 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1635 /* Check the DD bit first */
1636 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1640 nmb = rte_mbuf_raw_alloc(rxq->mp);
1641 if (unlikely(!nmb)) {
1642 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1643 dev->data->rx_mbuf_alloc_failed++;
1646 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1649 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1651 if (unlikely(rx_id == rxq->nb_rx_desc))
1654 /* Prefetch next mbuf */
1655 rte_prefetch0(sw_ring[rx_id].mbuf);
1658 * When next RX descriptor is on a cache line boundary,
1659 * prefetch the next 4 RX descriptors and next 8 pointers
1662 if ((rx_id & 0x3) == 0) {
1663 rte_prefetch0(&rx_ring[rx_id]);
1664 rte_prefetch0(&sw_ring[rx_id]);
1670 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1672 /* Set data buffer address and data length of the mbuf */
1673 rxdp->read.hdr_addr = 0;
1674 rxdp->read.pkt_addr = dma_addr;
1675 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1676 ICE_RX_FLX_DESC_PKT_LEN_M;
1677 rxm->data_len = rx_packet_len;
1678 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1681 * If this is the first buffer of the received packet, set the
1682 * pointer to the first mbuf of the packet and initialize its
1683 * context. Otherwise, update the total length and the number
1684 * of segments of the current scattered packet, and update the
1685 * pointer to the last mbuf of the current packet.
1689 first_seg->nb_segs = 1;
1690 first_seg->pkt_len = rx_packet_len;
1692 first_seg->pkt_len =
1693 (uint16_t)(first_seg->pkt_len +
1695 first_seg->nb_segs++;
1696 last_seg->next = rxm;
1700 * If this is not the last buffer of the received packet,
1701 * update the pointer to the last mbuf of the current scattered
1702 * packet and continue to parse the RX ring.
1704 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1710 * This is the last buffer of the received packet. If the CRC
1711 * is not stripped by the hardware:
1712 * - Subtract the CRC length from the total packet length.
1713 * - If the last buffer only contains the whole CRC or a part
1714 * of it, free the mbuf associated to the last buffer. If part
1715 * of the CRC is also contained in the previous mbuf, subtract
1716 * the length of that CRC part from the data length of the
1720 if (unlikely(rxq->crc_len > 0)) {
1721 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1722 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1723 rte_pktmbuf_free_seg(rxm);
1724 first_seg->nb_segs--;
1725 last_seg->data_len =
1726 (uint16_t)(last_seg->data_len -
1727 (RTE_ETHER_CRC_LEN - rx_packet_len));
1728 last_seg->next = NULL;
1730 rxm->data_len = (uint16_t)(rx_packet_len -
1734 first_seg->port = rxq->port_id;
1735 first_seg->ol_flags = 0;
1736 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1737 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1738 ice_rxd_to_vlan_tci(first_seg, &rxd);
1739 ice_rxd_to_pkt_fields(first_seg, &rxd);
1740 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1741 first_seg->ol_flags |= pkt_flags;
1742 /* Prefetch data of first segment, if configured to do so. */
1743 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1744 first_seg->data_off));
1745 rx_pkts[nb_rx++] = first_seg;
1749 /* Record index of the next RX descriptor to probe. */
1750 rxq->rx_tail = rx_id;
1751 rxq->pkt_first_seg = first_seg;
1752 rxq->pkt_last_seg = last_seg;
1755 * If the number of free RX descriptors is greater than the RX free
1756 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1757 * register. Update the RDT with the value of the last processed RX
1758 * descriptor minus 1, to guarantee that the RDT register is never
1759 * equal to the RDH register, which creates a "full" ring situtation
1760 * from the hardware point of view.
1762 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1763 if (nb_hold > rxq->rx_free_thresh) {
1764 rx_id = (uint16_t)(rx_id == 0 ?
1765 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1766 /* write TAIL register */
1767 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1770 rxq->nb_rx_hold = nb_hold;
1772 /* return received packet in the burst */
1777 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1779 struct ice_adapter *ad =
1780 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1781 const uint32_t *ptypes;
1783 static const uint32_t ptypes_os[] = {
1784 /* refers to ice_get_default_pkt_type() */
1786 RTE_PTYPE_L2_ETHER_TIMESYNC,
1787 RTE_PTYPE_L2_ETHER_LLDP,
1788 RTE_PTYPE_L2_ETHER_ARP,
1789 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1790 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1793 RTE_PTYPE_L4_NONFRAG,
1797 RTE_PTYPE_TUNNEL_GRENAT,
1798 RTE_PTYPE_TUNNEL_IP,
1799 RTE_PTYPE_INNER_L2_ETHER,
1800 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1801 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1802 RTE_PTYPE_INNER_L4_FRAG,
1803 RTE_PTYPE_INNER_L4_ICMP,
1804 RTE_PTYPE_INNER_L4_NONFRAG,
1805 RTE_PTYPE_INNER_L4_SCTP,
1806 RTE_PTYPE_INNER_L4_TCP,
1807 RTE_PTYPE_INNER_L4_UDP,
1811 static const uint32_t ptypes_comms[] = {
1812 /* refers to ice_get_default_pkt_type() */
1814 RTE_PTYPE_L2_ETHER_TIMESYNC,
1815 RTE_PTYPE_L2_ETHER_LLDP,
1816 RTE_PTYPE_L2_ETHER_ARP,
1817 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1818 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1821 RTE_PTYPE_L4_NONFRAG,
1825 RTE_PTYPE_TUNNEL_GRENAT,
1826 RTE_PTYPE_TUNNEL_IP,
1827 RTE_PTYPE_INNER_L2_ETHER,
1828 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1829 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1830 RTE_PTYPE_INNER_L4_FRAG,
1831 RTE_PTYPE_INNER_L4_ICMP,
1832 RTE_PTYPE_INNER_L4_NONFRAG,
1833 RTE_PTYPE_INNER_L4_SCTP,
1834 RTE_PTYPE_INNER_L4_TCP,
1835 RTE_PTYPE_INNER_L4_UDP,
1836 RTE_PTYPE_TUNNEL_GTPC,
1837 RTE_PTYPE_TUNNEL_GTPU,
1838 RTE_PTYPE_L2_ETHER_PPPOE,
1842 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1843 ptypes = ptypes_comms;
1847 if (dev->rx_pkt_burst == ice_recv_pkts ||
1848 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1849 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1853 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1854 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1855 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1856 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1864 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1866 volatile union ice_rx_flex_desc *rxdp;
1867 struct ice_rx_queue *rxq = rx_queue;
1870 if (unlikely(offset >= rxq->nb_rx_desc))
1873 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1874 return RTE_ETH_RX_DESC_UNAVAIL;
1876 desc = rxq->rx_tail + offset;
1877 if (desc >= rxq->nb_rx_desc)
1878 desc -= rxq->nb_rx_desc;
1880 rxdp = &rxq->rx_ring[desc];
1881 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1882 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
1883 return RTE_ETH_RX_DESC_DONE;
1885 return RTE_ETH_RX_DESC_AVAIL;
1889 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1891 struct ice_tx_queue *txq = tx_queue;
1892 volatile uint64_t *status;
1893 uint64_t mask, expect;
1896 if (unlikely(offset >= txq->nb_tx_desc))
1899 desc = txq->tx_tail + offset;
1900 /* go to next desc that has the RS bit */
1901 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1903 if (desc >= txq->nb_tx_desc) {
1904 desc -= txq->nb_tx_desc;
1905 if (desc >= txq->nb_tx_desc)
1906 desc -= txq->nb_tx_desc;
1909 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1910 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1911 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1912 ICE_TXD_QW1_DTYPE_S);
1913 if ((*status & mask) == expect)
1914 return RTE_ETH_TX_DESC_DONE;
1916 return RTE_ETH_TX_DESC_FULL;
1920 ice_free_queues(struct rte_eth_dev *dev)
1924 PMD_INIT_FUNC_TRACE();
1926 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1927 if (!dev->data->rx_queues[i])
1929 ice_rx_queue_release(dev->data->rx_queues[i]);
1930 dev->data->rx_queues[i] = NULL;
1931 rte_eth_dma_zone_free(dev, "rx_ring", i);
1933 dev->data->nb_rx_queues = 0;
1935 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1936 if (!dev->data->tx_queues[i])
1938 ice_tx_queue_release(dev->data->tx_queues[i]);
1939 dev->data->tx_queues[i] = NULL;
1940 rte_eth_dma_zone_free(dev, "tx_ring", i);
1942 dev->data->nb_tx_queues = 0;
1945 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
1946 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
1949 ice_fdir_setup_tx_resources(struct ice_pf *pf)
1951 struct ice_tx_queue *txq;
1952 const struct rte_memzone *tz = NULL;
1954 struct rte_eth_dev *dev;
1957 PMD_DRV_LOG(ERR, "PF is not available");
1961 dev = pf->adapter->eth_dev;
1963 /* Allocate the TX queue data structure. */
1964 txq = rte_zmalloc_socket("ice fdir tx queue",
1965 sizeof(struct ice_tx_queue),
1966 RTE_CACHE_LINE_SIZE,
1969 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1970 "tx queue structure.");
1974 /* Allocate TX hardware ring descriptors. */
1975 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
1976 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1978 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
1979 ICE_FDIR_QUEUE_ID, ring_size,
1980 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
1982 ice_tx_queue_release(txq);
1983 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
1987 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
1988 txq->queue_id = ICE_FDIR_QUEUE_ID;
1989 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
1990 txq->vsi = pf->fdir.fdir_vsi;
1992 txq->tx_ring_dma = tz->iova;
1993 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
1995 * don't need to allocate software ring and reset for the fdir
1996 * program queue just set the queue has been configured.
2001 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2007 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2009 struct ice_rx_queue *rxq;
2010 const struct rte_memzone *rz = NULL;
2012 struct rte_eth_dev *dev;
2015 PMD_DRV_LOG(ERR, "PF is not available");
2019 dev = pf->adapter->eth_dev;
2021 /* Allocate the RX queue data structure. */
2022 rxq = rte_zmalloc_socket("ice fdir rx queue",
2023 sizeof(struct ice_rx_queue),
2024 RTE_CACHE_LINE_SIZE,
2027 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2028 "rx queue structure.");
2032 /* Allocate RX hardware ring descriptors. */
2033 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2034 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2036 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2037 ICE_FDIR_QUEUE_ID, ring_size,
2038 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2040 ice_rx_queue_release(rxq);
2041 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2045 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2046 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2047 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2048 rxq->vsi = pf->fdir.fdir_vsi;
2050 rxq->rx_ring_dma = rz->iova;
2051 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2052 sizeof(union ice_32byte_rx_desc));
2053 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2056 * Don't need to allocate software ring and reset for the fdir
2057 * rx queue, just set the queue has been configured.
2062 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2068 ice_recv_pkts(void *rx_queue,
2069 struct rte_mbuf **rx_pkts,
2072 struct ice_rx_queue *rxq = rx_queue;
2073 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2074 volatile union ice_rx_flex_desc *rxdp;
2075 union ice_rx_flex_desc rxd;
2076 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2077 struct ice_rx_entry *rxe;
2078 struct rte_mbuf *nmb; /* new allocated mbuf */
2079 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2080 uint16_t rx_id = rxq->rx_tail;
2082 uint16_t nb_hold = 0;
2083 uint16_t rx_packet_len;
2084 uint16_t rx_stat_err0;
2087 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2088 struct rte_eth_dev *dev;
2090 while (nb_rx < nb_pkts) {
2091 rxdp = &rx_ring[rx_id];
2092 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2094 /* Check the DD bit first */
2095 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2099 nmb = rte_mbuf_raw_alloc(rxq->mp);
2100 if (unlikely(!nmb)) {
2101 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2102 dev->data->rx_mbuf_alloc_failed++;
2105 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2108 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2110 if (unlikely(rx_id == rxq->nb_rx_desc))
2115 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2118 * fill the read format of descriptor with physic address in
2119 * new allocated mbuf: nmb
2121 rxdp->read.hdr_addr = 0;
2122 rxdp->read.pkt_addr = dma_addr;
2124 /* calculate rx_packet_len of the received pkt */
2125 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2126 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2128 /* fill old mbuf with received descriptor: rxd */
2129 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2130 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2133 rxm->pkt_len = rx_packet_len;
2134 rxm->data_len = rx_packet_len;
2135 rxm->port = rxq->port_id;
2136 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2137 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2138 ice_rxd_to_vlan_tci(rxm, &rxd);
2139 ice_rxd_to_pkt_fields(rxm, &rxd);
2140 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2141 rxm->ol_flags |= pkt_flags;
2142 /* copy old mbuf to rx_pkts */
2143 rx_pkts[nb_rx++] = rxm;
2145 rxq->rx_tail = rx_id;
2147 * If the number of free RX descriptors is greater than the RX free
2148 * threshold of the queue, advance the receive tail register of queue.
2149 * Update that register with the value of the last processed RX
2150 * descriptor minus 1.
2152 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2153 if (nb_hold > rxq->rx_free_thresh) {
2154 rx_id = (uint16_t)(rx_id == 0 ?
2155 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2156 /* write TAIL register */
2157 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
2160 rxq->nb_rx_hold = nb_hold;
2162 /* return received packet in the burst */
2167 ice_parse_tunneling_params(uint64_t ol_flags,
2168 union ice_tx_offload tx_offload,
2169 uint32_t *cd_tunneling)
2171 /* EIPT: External (outer) IP header type */
2172 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2173 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2174 else if (ol_flags & PKT_TX_OUTER_IPV4)
2175 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2176 else if (ol_flags & PKT_TX_OUTER_IPV6)
2177 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2179 /* EIPLEN: External (outer) IP header length, in DWords */
2180 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2181 ICE_TXD_CTX_QW0_EIPLEN_S;
2183 /* L4TUNT: L4 Tunneling Type */
2184 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2185 case PKT_TX_TUNNEL_IPIP:
2186 /* for non UDP / GRE tunneling, set to 00b */
2188 case PKT_TX_TUNNEL_VXLAN:
2189 case PKT_TX_TUNNEL_GTP:
2190 case PKT_TX_TUNNEL_GENEVE:
2191 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2193 case PKT_TX_TUNNEL_GRE:
2194 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2197 PMD_TX_LOG(ERR, "Tunnel type not supported");
2201 /* L4TUNLEN: L4 Tunneling Length, in Words
2203 * We depend on app to set rte_mbuf.l2_len correctly.
2204 * For IP in GRE it should be set to the length of the GRE
2206 * For MAC in GRE or MAC in UDP it should be set to the length
2207 * of the GRE or UDP headers plus the inner MAC up to including
2208 * its last Ethertype.
2209 * If MPLS labels exists, it should include them as well.
2211 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2212 ICE_TXD_CTX_QW0_NATLEN_S;
2214 if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
2215 (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2216 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2217 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2221 ice_txd_enable_checksum(uint64_t ol_flags,
2223 uint32_t *td_offset,
2224 union ice_tx_offload tx_offload)
2227 if (ol_flags & PKT_TX_TUNNEL_MASK)
2228 *td_offset |= (tx_offload.outer_l2_len >> 1)
2229 << ICE_TX_DESC_LEN_MACLEN_S;
2231 *td_offset |= (tx_offload.l2_len >> 1)
2232 << ICE_TX_DESC_LEN_MACLEN_S;
2234 /* Enable L3 checksum offloads */
2235 if (ol_flags & PKT_TX_IP_CKSUM) {
2236 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2237 *td_offset |= (tx_offload.l3_len >> 2) <<
2238 ICE_TX_DESC_LEN_IPLEN_S;
2239 } else if (ol_flags & PKT_TX_IPV4) {
2240 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2241 *td_offset |= (tx_offload.l3_len >> 2) <<
2242 ICE_TX_DESC_LEN_IPLEN_S;
2243 } else if (ol_flags & PKT_TX_IPV6) {
2244 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2245 *td_offset |= (tx_offload.l3_len >> 2) <<
2246 ICE_TX_DESC_LEN_IPLEN_S;
2249 if (ol_flags & PKT_TX_TCP_SEG) {
2250 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2251 *td_offset |= (tx_offload.l4_len >> 2) <<
2252 ICE_TX_DESC_LEN_L4_LEN_S;
2256 /* Enable L4 checksum offloads */
2257 switch (ol_flags & PKT_TX_L4_MASK) {
2258 case PKT_TX_TCP_CKSUM:
2259 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2260 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2261 ICE_TX_DESC_LEN_L4_LEN_S;
2263 case PKT_TX_SCTP_CKSUM:
2264 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2265 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2266 ICE_TX_DESC_LEN_L4_LEN_S;
2268 case PKT_TX_UDP_CKSUM:
2269 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2270 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2271 ICE_TX_DESC_LEN_L4_LEN_S;
2279 ice_xmit_cleanup(struct ice_tx_queue *txq)
2281 struct ice_tx_entry *sw_ring = txq->sw_ring;
2282 volatile struct ice_tx_desc *txd = txq->tx_ring;
2283 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2284 uint16_t nb_tx_desc = txq->nb_tx_desc;
2285 uint16_t desc_to_clean_to;
2286 uint16_t nb_tx_to_clean;
2288 /* Determine the last descriptor needing to be cleaned */
2289 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2290 if (desc_to_clean_to >= nb_tx_desc)
2291 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2293 /* Check to make sure the last descriptor to clean is done */
2294 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2295 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2296 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2297 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
2298 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2300 txq->port_id, txq->queue_id,
2301 txd[desc_to_clean_to].cmd_type_offset_bsz);
2302 /* Failed to clean any descriptors */
2306 /* Figure out how many descriptors will be cleaned */
2307 if (last_desc_cleaned > desc_to_clean_to)
2308 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2311 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2314 /* The last descriptor to clean is done, so that means all the
2315 * descriptors from the last descriptor that was cleaned
2316 * up to the last descriptor with the RS bit set
2317 * are done. Only reset the threshold descriptor.
2319 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2321 /* Update the txq to reflect the last descriptor that was cleaned */
2322 txq->last_desc_cleaned = desc_to_clean_to;
2323 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2328 /* Construct the tx flags */
2329 static inline uint64_t
2330 ice_build_ctob(uint32_t td_cmd,
2335 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2336 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2337 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2338 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2339 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2342 /* Check if the context descriptor is needed for TX offloading */
2343 static inline uint16_t
2344 ice_calc_context_desc(uint64_t flags)
2346 static uint64_t mask = PKT_TX_TCP_SEG |
2348 PKT_TX_OUTER_IP_CKSUM |
2351 return (flags & mask) ? 1 : 0;
2354 /* set ice TSO context descriptor */
2355 static inline uint64_t
2356 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2358 uint64_t ctx_desc = 0;
2359 uint32_t cd_cmd, hdr_len, cd_tso_len;
2361 if (!tx_offload.l4_len) {
2362 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2366 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2367 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2368 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2370 cd_cmd = ICE_TX_CTX_DESC_TSO;
2371 cd_tso_len = mbuf->pkt_len - hdr_len;
2372 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2373 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2374 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2379 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2380 #define ICE_MAX_DATA_PER_TXD \
2381 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2382 /* Calculate the number of TX descriptors needed for each pkt */
2383 static inline uint16_t
2384 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2386 struct rte_mbuf *txd = tx_pkt;
2389 while (txd != NULL) {
2390 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2398 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2400 struct ice_tx_queue *txq;
2401 volatile struct ice_tx_desc *tx_ring;
2402 volatile struct ice_tx_desc *txd;
2403 struct ice_tx_entry *sw_ring;
2404 struct ice_tx_entry *txe, *txn;
2405 struct rte_mbuf *tx_pkt;
2406 struct rte_mbuf *m_seg;
2407 uint32_t cd_tunneling_params;
2412 uint32_t td_cmd = 0;
2413 uint32_t td_offset = 0;
2414 uint32_t td_tag = 0;
2417 uint64_t buf_dma_addr;
2419 union ice_tx_offload tx_offload = {0};
2422 sw_ring = txq->sw_ring;
2423 tx_ring = txq->tx_ring;
2424 tx_id = txq->tx_tail;
2425 txe = &sw_ring[tx_id];
2427 /* Check if the descriptor ring needs to be cleaned. */
2428 if (txq->nb_tx_free < txq->tx_free_thresh)
2429 (void)ice_xmit_cleanup(txq);
2431 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2432 tx_pkt = *tx_pkts++;
2437 ol_flags = tx_pkt->ol_flags;
2438 tx_offload.l2_len = tx_pkt->l2_len;
2439 tx_offload.l3_len = tx_pkt->l3_len;
2440 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2441 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2442 tx_offload.l4_len = tx_pkt->l4_len;
2443 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2444 /* Calculate the number of context descriptors needed. */
2445 nb_ctx = ice_calc_context_desc(ol_flags);
2447 /* The number of descriptors that must be allocated for
2448 * a packet equals to the number of the segments of that
2449 * packet plus the number of context descriptor if needed.
2450 * Recalculate the needed tx descs when TSO enabled in case
2451 * the mbuf data size exceeds max data size that hw allows
2454 if (ol_flags & PKT_TX_TCP_SEG)
2455 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2458 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2459 tx_last = (uint16_t)(tx_id + nb_used - 1);
2462 if (tx_last >= txq->nb_tx_desc)
2463 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2465 if (nb_used > txq->nb_tx_free) {
2466 if (ice_xmit_cleanup(txq) != 0) {
2471 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2472 while (nb_used > txq->nb_tx_free) {
2473 if (ice_xmit_cleanup(txq) != 0) {
2482 /* Descriptor based VLAN insertion */
2483 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2484 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2485 td_tag = tx_pkt->vlan_tci;
2488 /* Fill in tunneling parameters if necessary */
2489 cd_tunneling_params = 0;
2490 if (ol_flags & PKT_TX_TUNNEL_MASK)
2491 ice_parse_tunneling_params(ol_flags, tx_offload,
2492 &cd_tunneling_params);
2494 /* Enable checksum offloading */
2495 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2496 ice_txd_enable_checksum(ol_flags, &td_cmd,
2497 &td_offset, tx_offload);
2500 /* Setup TX context descriptor if required */
2501 volatile struct ice_tx_ctx_desc *ctx_txd =
2502 (volatile struct ice_tx_ctx_desc *)
2504 uint16_t cd_l2tag2 = 0;
2505 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2507 txn = &sw_ring[txe->next_id];
2508 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2510 rte_pktmbuf_free_seg(txe->mbuf);
2514 if (ol_flags & PKT_TX_TCP_SEG)
2515 cd_type_cmd_tso_mss |=
2516 ice_set_tso_ctx(tx_pkt, tx_offload);
2518 ctx_txd->tunneling_params =
2519 rte_cpu_to_le_32(cd_tunneling_params);
2521 /* TX context descriptor based double VLAN insert */
2522 if (ol_flags & PKT_TX_QINQ) {
2523 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2524 cd_type_cmd_tso_mss |=
2525 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2526 ICE_TXD_CTX_QW1_CMD_S);
2528 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2530 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2532 txe->last_id = tx_last;
2533 tx_id = txe->next_id;
2539 txd = &tx_ring[tx_id];
2540 txn = &sw_ring[txe->next_id];
2543 rte_pktmbuf_free_seg(txe->mbuf);
2546 /* Setup TX Descriptor */
2547 slen = m_seg->data_len;
2548 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2550 while ((ol_flags & PKT_TX_TCP_SEG) &&
2551 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2552 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2553 txd->cmd_type_offset_bsz =
2554 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2555 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2556 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2557 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2558 ICE_TXD_QW1_TX_BUF_SZ_S) |
2559 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2561 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2562 slen -= ICE_MAX_DATA_PER_TXD;
2564 txe->last_id = tx_last;
2565 tx_id = txe->next_id;
2567 txd = &tx_ring[tx_id];
2568 txn = &sw_ring[txe->next_id];
2571 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2572 txd->cmd_type_offset_bsz =
2573 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2574 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2575 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2576 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2577 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2579 txe->last_id = tx_last;
2580 tx_id = txe->next_id;
2582 m_seg = m_seg->next;
2585 /* fill the last descriptor with End of Packet (EOP) bit */
2586 td_cmd |= ICE_TX_DESC_CMD_EOP;
2587 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2588 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2590 /* set RS bit on the last descriptor of one packet */
2591 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2592 PMD_TX_FREE_LOG(DEBUG,
2593 "Setting RS bit on TXD id="
2594 "%4u (port=%d queue=%d)",
2595 tx_last, txq->port_id, txq->queue_id);
2597 td_cmd |= ICE_TX_DESC_CMD_RS;
2599 /* Update txq RS bit counters */
2600 txq->nb_tx_used = 0;
2602 txd->cmd_type_offset_bsz |=
2603 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2607 /* update Tail register */
2608 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2609 txq->tx_tail = tx_id;
2614 static __rte_always_inline int
2615 ice_tx_free_bufs(struct ice_tx_queue *txq)
2617 struct ice_tx_entry *txep;
2620 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2621 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2622 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2625 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2627 for (i = 0; i < txq->tx_rs_thresh; i++)
2628 rte_prefetch0((txep + i)->mbuf);
2630 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2631 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2632 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2636 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2637 rte_pktmbuf_free_seg(txep->mbuf);
2642 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2643 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2644 if (txq->tx_next_dd >= txq->nb_tx_desc)
2645 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2647 return txq->tx_rs_thresh;
2651 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2654 struct ice_tx_entry *swr_ring = txq->sw_ring;
2655 uint16_t i, tx_last, tx_id;
2656 uint16_t nb_tx_free_last;
2657 uint16_t nb_tx_to_clean;
2660 /* Start free mbuf from the next of tx_tail */
2661 tx_last = txq->tx_tail;
2662 tx_id = swr_ring[tx_last].next_id;
2664 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2667 nb_tx_to_clean = txq->nb_tx_free;
2668 nb_tx_free_last = txq->nb_tx_free;
2670 free_cnt = txq->nb_tx_desc;
2672 /* Loop through swr_ring to count the amount of
2673 * freeable mubfs and packets.
2675 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2676 for (i = 0; i < nb_tx_to_clean &&
2677 pkt_cnt < free_cnt &&
2678 tx_id != tx_last; i++) {
2679 if (swr_ring[tx_id].mbuf != NULL) {
2680 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2681 swr_ring[tx_id].mbuf = NULL;
2684 * last segment in the packet,
2685 * increment packet count
2687 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2690 tx_id = swr_ring[tx_id].next_id;
2693 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2694 txq->nb_tx_free || tx_id == tx_last)
2697 if (pkt_cnt < free_cnt) {
2698 if (ice_xmit_cleanup(txq))
2701 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2702 nb_tx_free_last = txq->nb_tx_free;
2706 return (int)pkt_cnt;
2711 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2712 uint32_t free_cnt __rte_unused)
2719 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2724 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2725 free_cnt = txq->nb_tx_desc;
2727 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2729 for (i = 0; i < cnt; i += n) {
2730 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2733 n = ice_tx_free_bufs(txq);
2743 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2745 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2746 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2747 struct ice_adapter *ad =
2748 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2751 if (ad->tx_vec_allowed)
2752 return ice_tx_done_cleanup_vec(q, free_cnt);
2754 if (ad->tx_simple_allowed)
2755 return ice_tx_done_cleanup_simple(q, free_cnt);
2757 return ice_tx_done_cleanup_full(q, free_cnt);
2760 /* Populate 4 descriptors with data from 4 mbufs */
2762 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2767 for (i = 0; i < 4; i++, txdp++, pkts++) {
2768 dma_addr = rte_mbuf_data_iova(*pkts);
2769 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2770 txdp->cmd_type_offset_bsz =
2771 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2772 (*pkts)->data_len, 0);
2776 /* Populate 1 descriptor with data from 1 mbuf */
2778 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2782 dma_addr = rte_mbuf_data_iova(*pkts);
2783 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2784 txdp->cmd_type_offset_bsz =
2785 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2786 (*pkts)->data_len, 0);
2790 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2793 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2794 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2795 const int N_PER_LOOP = 4;
2796 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2797 int mainpart, leftover;
2801 * Process most of the packets in chunks of N pkts. Any
2802 * leftover packets will get processed one at a time.
2804 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2805 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2806 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2807 /* Copy N mbuf pointers to the S/W ring */
2808 for (j = 0; j < N_PER_LOOP; ++j)
2809 (txep + i + j)->mbuf = *(pkts + i + j);
2810 tx4(txdp + i, pkts + i);
2813 if (unlikely(leftover > 0)) {
2814 for (i = 0; i < leftover; ++i) {
2815 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2816 tx1(txdp + mainpart + i, pkts + mainpart + i);
2821 static inline uint16_t
2822 tx_xmit_pkts(struct ice_tx_queue *txq,
2823 struct rte_mbuf **tx_pkts,
2826 volatile struct ice_tx_desc *txr = txq->tx_ring;
2830 * Begin scanning the H/W ring for done descriptors when the number
2831 * of available descriptors drops below tx_free_thresh. For each done
2832 * descriptor, free the associated buffer.
2834 if (txq->nb_tx_free < txq->tx_free_thresh)
2835 ice_tx_free_bufs(txq);
2837 /* Use available descriptor only */
2838 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2839 if (unlikely(!nb_pkts))
2842 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2843 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2844 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2845 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2846 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2847 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2849 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2853 /* Fill hardware descriptor ring with mbuf data */
2854 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2855 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2857 /* Determin if RS bit needs to be set */
2858 if (txq->tx_tail > txq->tx_next_rs) {
2859 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2860 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2863 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2864 if (txq->tx_next_rs >= txq->nb_tx_desc)
2865 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2868 if (txq->tx_tail >= txq->nb_tx_desc)
2871 /* Update the tx tail register */
2872 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2878 ice_xmit_pkts_simple(void *tx_queue,
2879 struct rte_mbuf **tx_pkts,
2884 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2885 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2889 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2892 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2893 &tx_pkts[nb_tx], num);
2894 nb_tx = (uint16_t)(nb_tx + ret);
2895 nb_pkts = (uint16_t)(nb_pkts - ret);
2904 ice_set_rx_function(struct rte_eth_dev *dev)
2906 PMD_INIT_FUNC_TRACE();
2907 struct ice_adapter *ad =
2908 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2910 struct ice_rx_queue *rxq;
2912 bool use_avx2 = false;
2914 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2915 if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
2916 ad->rx_vec_allowed = true;
2917 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2918 rxq = dev->data->rx_queues[i];
2919 if (rxq && ice_rxq_vec_setup(rxq)) {
2920 ad->rx_vec_allowed = false;
2925 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2926 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2930 ad->rx_vec_allowed = false;
2934 if (ad->rx_vec_allowed) {
2935 if (dev->data->scattered_rx) {
2937 "Using %sVector Scattered Rx (port %d).",
2938 use_avx2 ? "avx2 " : "",
2939 dev->data->port_id);
2940 dev->rx_pkt_burst = use_avx2 ?
2941 ice_recv_scattered_pkts_vec_avx2 :
2942 ice_recv_scattered_pkts_vec;
2944 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2945 use_avx2 ? "avx2 " : "",
2946 dev->data->port_id);
2947 dev->rx_pkt_burst = use_avx2 ?
2948 ice_recv_pkts_vec_avx2 :
2956 if (dev->data->scattered_rx) {
2957 /* Set the non-LRO scattered function */
2959 "Using a Scattered function on port %d.",
2960 dev->data->port_id);
2961 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2962 } else if (ad->rx_bulk_alloc_allowed) {
2964 "Rx Burst Bulk Alloc Preconditions are "
2965 "satisfied. Rx Burst Bulk Alloc function "
2966 "will be used on port %d.",
2967 dev->data->port_id);
2968 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2971 "Rx Burst Bulk Alloc Preconditions are not "
2972 "satisfied, Normal Rx will be used on port %d.",
2973 dev->data->port_id);
2974 dev->rx_pkt_burst = ice_recv_pkts;
2978 static const struct {
2979 eth_rx_burst_t pkt_burst;
2981 } ice_rx_burst_infos[] = {
2982 { ice_recv_scattered_pkts, "Scalar Scattered" },
2983 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
2984 { ice_recv_pkts, "Scalar" },
2986 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
2987 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
2988 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
2989 { ice_recv_pkts_vec, "Vector SSE" },
2994 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2995 struct rte_eth_burst_mode *mode)
2997 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3001 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3002 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3003 snprintf(mode->info, sizeof(mode->info), "%s",
3004 ice_rx_burst_infos[i].info);
3014 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3016 struct ice_adapter *ad =
3017 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3019 /* Use a simple Tx queue if possible (only fast free is allowed) */
3020 ad->tx_simple_allowed =
3022 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3023 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3025 if (ad->tx_simple_allowed)
3026 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3030 "Simple Tx can NOT be enabled on Tx queue %u.",
3034 /*********************************************************************
3038 **********************************************************************/
3039 /* The default values of TSO MSS */
3040 #define ICE_MIN_TSO_MSS 64
3041 #define ICE_MAX_TSO_MSS 9728
3042 #define ICE_MAX_TSO_FRAME_SIZE 262144
3044 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3051 for (i = 0; i < nb_pkts; i++) {
3053 ol_flags = m->ol_flags;
3055 if (ol_flags & PKT_TX_TCP_SEG &&
3056 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3057 m->tso_segsz > ICE_MAX_TSO_MSS ||
3058 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3060 * MSS outside the range are considered malicious
3066 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3067 ret = rte_validate_tx_offload(m);
3073 ret = rte_net_intel_cksum_prepare(m);
3083 ice_set_tx_function(struct rte_eth_dev *dev)
3085 struct ice_adapter *ad =
3086 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3088 struct ice_tx_queue *txq;
3090 bool use_avx2 = false;
3092 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3093 if (!ice_tx_vec_dev_check(dev)) {
3094 ad->tx_vec_allowed = true;
3095 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3096 txq = dev->data->tx_queues[i];
3097 if (txq && ice_txq_vec_setup(txq)) {
3098 ad->tx_vec_allowed = false;
3103 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3104 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
3108 ad->tx_vec_allowed = false;
3112 if (ad->tx_vec_allowed) {
3113 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3114 use_avx2 ? "avx2 " : "",
3115 dev->data->port_id);
3116 dev->tx_pkt_burst = use_avx2 ?
3117 ice_xmit_pkts_vec_avx2 :
3119 dev->tx_pkt_prepare = NULL;
3125 if (ad->tx_simple_allowed) {
3126 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3127 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3128 dev->tx_pkt_prepare = NULL;
3130 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3131 dev->tx_pkt_burst = ice_xmit_pkts;
3132 dev->tx_pkt_prepare = ice_prep_pkts;
3136 static const struct {
3137 eth_tx_burst_t pkt_burst;
3139 } ice_tx_burst_infos[] = {
3140 { ice_xmit_pkts_simple, "Scalar Simple" },
3141 { ice_xmit_pkts, "Scalar" },
3143 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3144 { ice_xmit_pkts_vec, "Vector SSE" },
3149 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3150 struct rte_eth_burst_mode *mode)
3152 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3156 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3157 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3158 snprintf(mode->info, sizeof(mode->info), "%s",
3159 ice_tx_burst_infos[i].info);
3168 /* For each value it means, datasheet of hardware can tell more details
3170 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3172 static inline uint32_t
3173 ice_get_default_pkt_type(uint16_t ptype)
3175 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3176 __rte_cache_aligned = {
3179 [1] = RTE_PTYPE_L2_ETHER,
3180 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3181 /* [3] - [5] reserved */
3182 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3183 /* [7] - [10] reserved */
3184 [11] = RTE_PTYPE_L2_ETHER_ARP,
3185 /* [12] - [21] reserved */
3187 /* Non tunneled IPv4 */
3188 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3190 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3191 RTE_PTYPE_L4_NONFRAG,
3192 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3195 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3197 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3199 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3203 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3204 RTE_PTYPE_TUNNEL_IP |
3205 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3206 RTE_PTYPE_INNER_L4_FRAG,
3207 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3208 RTE_PTYPE_TUNNEL_IP |
3209 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3210 RTE_PTYPE_INNER_L4_NONFRAG,
3211 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3212 RTE_PTYPE_TUNNEL_IP |
3213 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3214 RTE_PTYPE_INNER_L4_UDP,
3216 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3217 RTE_PTYPE_TUNNEL_IP |
3218 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3219 RTE_PTYPE_INNER_L4_TCP,
3220 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3221 RTE_PTYPE_TUNNEL_IP |
3222 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3223 RTE_PTYPE_INNER_L4_SCTP,
3224 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3225 RTE_PTYPE_TUNNEL_IP |
3226 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3227 RTE_PTYPE_INNER_L4_ICMP,
3230 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3231 RTE_PTYPE_TUNNEL_IP |
3232 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3233 RTE_PTYPE_INNER_L4_FRAG,
3234 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3235 RTE_PTYPE_TUNNEL_IP |
3236 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3237 RTE_PTYPE_INNER_L4_NONFRAG,
3238 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3239 RTE_PTYPE_TUNNEL_IP |
3240 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3241 RTE_PTYPE_INNER_L4_UDP,
3243 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3244 RTE_PTYPE_TUNNEL_IP |
3245 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3246 RTE_PTYPE_INNER_L4_TCP,
3247 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3248 RTE_PTYPE_TUNNEL_IP |
3249 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3250 RTE_PTYPE_INNER_L4_SCTP,
3251 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3252 RTE_PTYPE_TUNNEL_IP |
3253 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3254 RTE_PTYPE_INNER_L4_ICMP,
3256 /* IPv4 --> GRE/Teredo/VXLAN */
3257 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3258 RTE_PTYPE_TUNNEL_GRENAT,
3260 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3261 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3262 RTE_PTYPE_TUNNEL_GRENAT |
3263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3264 RTE_PTYPE_INNER_L4_FRAG,
3265 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3266 RTE_PTYPE_TUNNEL_GRENAT |
3267 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3268 RTE_PTYPE_INNER_L4_NONFRAG,
3269 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3270 RTE_PTYPE_TUNNEL_GRENAT |
3271 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3272 RTE_PTYPE_INNER_L4_UDP,
3274 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3275 RTE_PTYPE_TUNNEL_GRENAT |
3276 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3277 RTE_PTYPE_INNER_L4_TCP,
3278 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3279 RTE_PTYPE_TUNNEL_GRENAT |
3280 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3281 RTE_PTYPE_INNER_L4_SCTP,
3282 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3283 RTE_PTYPE_TUNNEL_GRENAT |
3284 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3285 RTE_PTYPE_INNER_L4_ICMP,
3287 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3288 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3289 RTE_PTYPE_TUNNEL_GRENAT |
3290 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3291 RTE_PTYPE_INNER_L4_FRAG,
3292 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3293 RTE_PTYPE_TUNNEL_GRENAT |
3294 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3295 RTE_PTYPE_INNER_L4_NONFRAG,
3296 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3297 RTE_PTYPE_TUNNEL_GRENAT |
3298 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3299 RTE_PTYPE_INNER_L4_UDP,
3301 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3302 RTE_PTYPE_TUNNEL_GRENAT |
3303 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3304 RTE_PTYPE_INNER_L4_TCP,
3305 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3306 RTE_PTYPE_TUNNEL_GRENAT |
3307 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3308 RTE_PTYPE_INNER_L4_SCTP,
3309 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3310 RTE_PTYPE_TUNNEL_GRENAT |
3311 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3312 RTE_PTYPE_INNER_L4_ICMP,
3314 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3315 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3316 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3318 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3319 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3320 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3321 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3322 RTE_PTYPE_INNER_L4_FRAG,
3323 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3324 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3325 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3326 RTE_PTYPE_INNER_L4_NONFRAG,
3327 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3328 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3329 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3330 RTE_PTYPE_INNER_L4_UDP,
3332 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3333 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3334 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3335 RTE_PTYPE_INNER_L4_TCP,
3336 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3337 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3338 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3339 RTE_PTYPE_INNER_L4_SCTP,
3340 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3341 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3342 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3343 RTE_PTYPE_INNER_L4_ICMP,
3345 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3346 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3347 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3348 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3349 RTE_PTYPE_INNER_L4_FRAG,
3350 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3351 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3352 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3353 RTE_PTYPE_INNER_L4_NONFRAG,
3354 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3355 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3356 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3357 RTE_PTYPE_INNER_L4_UDP,
3359 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3360 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3361 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3362 RTE_PTYPE_INNER_L4_TCP,
3363 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3364 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3365 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3366 RTE_PTYPE_INNER_L4_SCTP,
3367 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3368 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3369 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3370 RTE_PTYPE_INNER_L4_ICMP,
3371 /* [73] - [87] reserved */
3373 /* Non tunneled IPv6 */
3374 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3376 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3377 RTE_PTYPE_L4_NONFRAG,
3378 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3381 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3383 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3385 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3389 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3390 RTE_PTYPE_TUNNEL_IP |
3391 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3392 RTE_PTYPE_INNER_L4_FRAG,
3393 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3394 RTE_PTYPE_TUNNEL_IP |
3395 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3396 RTE_PTYPE_INNER_L4_NONFRAG,
3397 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3398 RTE_PTYPE_TUNNEL_IP |
3399 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3400 RTE_PTYPE_INNER_L4_UDP,
3402 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3403 RTE_PTYPE_TUNNEL_IP |
3404 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3405 RTE_PTYPE_INNER_L4_TCP,
3406 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3407 RTE_PTYPE_TUNNEL_IP |
3408 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3409 RTE_PTYPE_INNER_L4_SCTP,
3410 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3411 RTE_PTYPE_TUNNEL_IP |
3412 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3413 RTE_PTYPE_INNER_L4_ICMP,
3416 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3417 RTE_PTYPE_TUNNEL_IP |
3418 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3419 RTE_PTYPE_INNER_L4_FRAG,
3420 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3421 RTE_PTYPE_TUNNEL_IP |
3422 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3423 RTE_PTYPE_INNER_L4_NONFRAG,
3424 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3425 RTE_PTYPE_TUNNEL_IP |
3426 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3427 RTE_PTYPE_INNER_L4_UDP,
3428 /* [105] reserved */
3429 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3430 RTE_PTYPE_TUNNEL_IP |
3431 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3432 RTE_PTYPE_INNER_L4_TCP,
3433 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3434 RTE_PTYPE_TUNNEL_IP |
3435 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3436 RTE_PTYPE_INNER_L4_SCTP,
3437 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3438 RTE_PTYPE_TUNNEL_IP |
3439 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3440 RTE_PTYPE_INNER_L4_ICMP,
3442 /* IPv6 --> GRE/Teredo/VXLAN */
3443 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3444 RTE_PTYPE_TUNNEL_GRENAT,
3446 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3447 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3448 RTE_PTYPE_TUNNEL_GRENAT |
3449 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3450 RTE_PTYPE_INNER_L4_FRAG,
3451 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3452 RTE_PTYPE_TUNNEL_GRENAT |
3453 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3454 RTE_PTYPE_INNER_L4_NONFRAG,
3455 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3456 RTE_PTYPE_TUNNEL_GRENAT |
3457 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3458 RTE_PTYPE_INNER_L4_UDP,
3459 /* [113] reserved */
3460 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3461 RTE_PTYPE_TUNNEL_GRENAT |
3462 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3463 RTE_PTYPE_INNER_L4_TCP,
3464 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3465 RTE_PTYPE_TUNNEL_GRENAT |
3466 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3467 RTE_PTYPE_INNER_L4_SCTP,
3468 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3469 RTE_PTYPE_TUNNEL_GRENAT |
3470 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3471 RTE_PTYPE_INNER_L4_ICMP,
3473 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3474 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3475 RTE_PTYPE_TUNNEL_GRENAT |
3476 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3477 RTE_PTYPE_INNER_L4_FRAG,
3478 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3479 RTE_PTYPE_TUNNEL_GRENAT |
3480 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3481 RTE_PTYPE_INNER_L4_NONFRAG,
3482 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3483 RTE_PTYPE_TUNNEL_GRENAT |
3484 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3485 RTE_PTYPE_INNER_L4_UDP,
3486 /* [120] reserved */
3487 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3488 RTE_PTYPE_TUNNEL_GRENAT |
3489 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3490 RTE_PTYPE_INNER_L4_TCP,
3491 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3492 RTE_PTYPE_TUNNEL_GRENAT |
3493 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3494 RTE_PTYPE_INNER_L4_SCTP,
3495 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3496 RTE_PTYPE_TUNNEL_GRENAT |
3497 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3498 RTE_PTYPE_INNER_L4_ICMP,
3500 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3501 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3502 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3504 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3505 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3506 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3507 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3508 RTE_PTYPE_INNER_L4_FRAG,
3509 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3510 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3511 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3512 RTE_PTYPE_INNER_L4_NONFRAG,
3513 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3514 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3515 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3516 RTE_PTYPE_INNER_L4_UDP,
3517 /* [128] reserved */
3518 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3519 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3520 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3521 RTE_PTYPE_INNER_L4_TCP,
3522 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3523 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3524 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3525 RTE_PTYPE_INNER_L4_SCTP,
3526 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3527 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3528 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3529 RTE_PTYPE_INNER_L4_ICMP,
3531 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3532 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3533 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3534 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3535 RTE_PTYPE_INNER_L4_FRAG,
3536 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3537 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3538 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3539 RTE_PTYPE_INNER_L4_NONFRAG,
3540 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3541 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3542 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3543 RTE_PTYPE_INNER_L4_UDP,
3544 /* [135] reserved */
3545 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3546 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3547 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3548 RTE_PTYPE_INNER_L4_TCP,
3549 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3550 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3551 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3552 RTE_PTYPE_INNER_L4_SCTP,
3553 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3554 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3555 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3556 RTE_PTYPE_INNER_L4_ICMP,
3557 /* [139] - [299] reserved */
3560 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3561 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3563 /* PPPoE --> IPv4 */
3564 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3565 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3567 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3568 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3569 RTE_PTYPE_L4_NONFRAG,
3570 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3571 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3573 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3574 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3576 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3577 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3579 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3580 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3583 /* PPPoE --> IPv6 */
3584 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3585 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3587 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3588 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3589 RTE_PTYPE_L4_NONFRAG,
3590 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3591 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3593 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3594 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3596 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3597 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3599 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3600 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3602 /* [314] - [324] reserved */
3604 /* IPv4/IPv6 --> GTPC/GTPU */
3605 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3606 RTE_PTYPE_TUNNEL_GTPC,
3607 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3608 RTE_PTYPE_TUNNEL_GTPC,
3609 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3610 RTE_PTYPE_TUNNEL_GTPC,
3611 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3612 RTE_PTYPE_TUNNEL_GTPC,
3613 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3614 RTE_PTYPE_TUNNEL_GTPU,
3615 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3616 RTE_PTYPE_TUNNEL_GTPU,
3618 /* IPv4 --> GTPU --> IPv4 */
3619 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3620 RTE_PTYPE_TUNNEL_GTPU |
3621 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3622 RTE_PTYPE_INNER_L4_FRAG,
3623 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3624 RTE_PTYPE_TUNNEL_GTPU |
3625 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3626 RTE_PTYPE_INNER_L4_NONFRAG,
3627 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3628 RTE_PTYPE_TUNNEL_GTPU |
3629 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3630 RTE_PTYPE_INNER_L4_UDP,
3631 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3632 RTE_PTYPE_TUNNEL_GTPU |
3633 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3634 RTE_PTYPE_INNER_L4_TCP,
3635 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3636 RTE_PTYPE_TUNNEL_GTPU |
3637 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3638 RTE_PTYPE_INNER_L4_ICMP,
3640 /* IPv6 --> GTPU --> IPv4 */
3641 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3642 RTE_PTYPE_TUNNEL_GTPU |
3643 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3644 RTE_PTYPE_INNER_L4_FRAG,
3645 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3646 RTE_PTYPE_TUNNEL_GTPU |
3647 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3648 RTE_PTYPE_INNER_L4_NONFRAG,
3649 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3650 RTE_PTYPE_TUNNEL_GTPU |
3651 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3652 RTE_PTYPE_INNER_L4_UDP,
3653 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3654 RTE_PTYPE_TUNNEL_GTPU |
3655 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3656 RTE_PTYPE_INNER_L4_TCP,
3657 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3658 RTE_PTYPE_TUNNEL_GTPU |
3659 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3660 RTE_PTYPE_INNER_L4_ICMP,
3662 /* IPv4 --> GTPU --> IPv6 */
3663 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3664 RTE_PTYPE_TUNNEL_GTPU |
3665 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3666 RTE_PTYPE_INNER_L4_FRAG,
3667 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3668 RTE_PTYPE_TUNNEL_GTPU |
3669 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3670 RTE_PTYPE_INNER_L4_NONFRAG,
3671 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3672 RTE_PTYPE_TUNNEL_GTPU |
3673 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3674 RTE_PTYPE_INNER_L4_UDP,
3675 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3676 RTE_PTYPE_TUNNEL_GTPU |
3677 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3678 RTE_PTYPE_INNER_L4_TCP,
3679 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3680 RTE_PTYPE_TUNNEL_GTPU |
3681 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3682 RTE_PTYPE_INNER_L4_ICMP,
3684 /* IPv6 --> GTPU --> IPv6 */
3685 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3686 RTE_PTYPE_TUNNEL_GTPU |
3687 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3688 RTE_PTYPE_INNER_L4_FRAG,
3689 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3690 RTE_PTYPE_TUNNEL_GTPU |
3691 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3692 RTE_PTYPE_INNER_L4_NONFRAG,
3693 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3694 RTE_PTYPE_TUNNEL_GTPU |
3695 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3696 RTE_PTYPE_INNER_L4_UDP,
3697 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3698 RTE_PTYPE_TUNNEL_GTPU |
3699 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3700 RTE_PTYPE_INNER_L4_TCP,
3701 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3702 RTE_PTYPE_TUNNEL_GTPU |
3703 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3704 RTE_PTYPE_INNER_L4_ICMP,
3705 /* All others reserved */
3708 return type_table[ptype];
3712 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3714 struct ice_adapter *ad =
3715 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3718 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3719 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
3722 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
3723 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
3724 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
3725 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
3726 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
3728 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
3729 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
3730 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
3731 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
3732 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
3733 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
3736 * check the programming status descriptor in rx queue.
3737 * done after Programming Flow Director is programmed on
3741 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
3743 volatile union ice_32byte_rx_desc *rxdp;
3750 rxdp = (volatile union ice_32byte_rx_desc *)
3751 (&rxq->rx_ring[rxq->rx_tail]);
3752 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
3753 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
3754 >> ICE_RXD_QW1_STATUS_S;
3756 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
3758 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
3759 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
3760 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
3761 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
3763 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
3764 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
3765 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
3766 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
3770 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
3771 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
3773 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
3777 rxdp->wb.qword1.status_error_len = 0;
3779 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
3781 if (rxq->rx_tail == 0)
3782 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
3784 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
3790 #define ICE_FDIR_MAX_WAIT_US 10000
3793 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
3795 struct ice_tx_queue *txq = pf->fdir.txq;
3796 struct ice_rx_queue *rxq = pf->fdir.rxq;
3797 volatile struct ice_fltr_desc *fdirdp;
3798 volatile struct ice_tx_desc *txdp;
3802 fdirdp = (volatile struct ice_fltr_desc *)
3803 (&txq->tx_ring[txq->tx_tail]);
3804 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
3805 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
3807 txdp = &txq->tx_ring[txq->tx_tail + 1];
3808 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
3809 td_cmd = ICE_TX_DESC_CMD_EOP |
3810 ICE_TX_DESC_CMD_RS |
3811 ICE_TX_DESC_CMD_DUMMY;
3813 txdp->cmd_type_offset_bsz =
3814 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
3817 if (txq->tx_tail >= txq->nb_tx_desc)
3819 /* Update the tx tail register */
3820 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
3821 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
3822 if ((txdp->cmd_type_offset_bsz &
3823 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
3824 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3828 if (i >= ICE_FDIR_MAX_WAIT_US) {
3830 "Failed to program FDIR filter: time out to get DD on tx queue.");
3834 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
3837 ret = ice_check_fdir_programming_status(rxq);
3845 "Failed to program FDIR filter: programming status reported.");