1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
9 #include "rte_pmd_ice.h"
12 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
16 PKT_TX_OUTER_IP_CKSUM)
18 /* Offset of mbuf dynamic field for protocol extraction data */
19 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
21 /* Mask of mbuf dynamic flags for protocol extraction type */
22 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
30 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
32 static uint8_t rxdid_map[] = {
33 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
34 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
35 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
36 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
37 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
38 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
39 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
42 return xtr_type < RTE_DIM(rxdid_map) ?
43 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
47 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
49 volatile union ice_rx_flex_desc *rxdp)
51 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
52 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
53 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
57 if (desc->flow_id != 0xFFFFFFFF) {
58 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
59 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
62 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
63 stat_err = rte_le_to_cpu_16(desc->status_error0);
64 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
65 mb->ol_flags |= PKT_RX_RSS_HASH;
66 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
72 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
74 volatile union ice_rx_flex_desc *rxdp)
76 volatile struct ice_32b_rx_flex_desc_comms *desc =
77 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
80 stat_err = rte_le_to_cpu_16(desc->status_error0);
81 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
82 mb->ol_flags |= PKT_RX_RSS_HASH;
83 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
86 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
87 if (desc->flow_id != 0xFFFFFFFF) {
88 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
89 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
92 if (rxq->xtr_ol_flag) {
93 uint32_t metadata = 0;
95 stat_err = rte_le_to_cpu_16(desc->status_error1);
97 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
98 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
100 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
102 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
105 mb->ol_flags |= rxq->xtr_ol_flag;
107 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
114 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
116 volatile union ice_rx_flex_desc *rxdp)
118 volatile struct ice_32b_rx_flex_desc_comms *desc =
119 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
122 stat_err = rte_le_to_cpu_16(desc->status_error0);
123 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
124 mb->ol_flags |= PKT_RX_RSS_HASH;
125 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
128 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
129 if (desc->flow_id != 0xFFFFFFFF) {
130 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
131 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
134 if (rxq->xtr_ol_flag) {
135 uint32_t metadata = 0;
137 if (desc->flex_ts.flex.aux0 != 0xFFFF)
138 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
139 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
140 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
143 mb->ol_flags |= rxq->xtr_ol_flag;
145 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
152 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
155 case ICE_RXDID_COMMS_AUX_VLAN:
156 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
157 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
160 case ICE_RXDID_COMMS_AUX_IPV4:
161 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
162 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
165 case ICE_RXDID_COMMS_AUX_IPV6:
166 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
167 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
170 case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
171 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
172 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
175 case ICE_RXDID_COMMS_AUX_TCP:
176 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
177 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
180 case ICE_RXDID_COMMS_AUX_IP_OFFSET:
181 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
182 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
185 case ICE_RXDID_COMMS_OVS:
186 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
190 /* update this according to the RXDID for PROTO_XTR_NONE */
191 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
195 if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
196 rxq->xtr_ol_flag = 0;
199 static enum ice_status
200 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
202 struct ice_vsi *vsi = rxq->vsi;
203 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
204 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
205 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
206 struct ice_rlan_ctx rx_ctx;
208 uint16_t buf_size, len;
209 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
210 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
213 /* Set buffer size as the head split is disabled. */
214 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
215 RTE_PKTMBUF_HEADROOM);
217 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
218 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
219 rxq->max_pkt_len = RTE_MIN(len,
220 dev->data->dev_conf.rxmode.max_rx_pkt_len);
222 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
223 if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
224 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
225 PMD_DRV_LOG(ERR, "maximum packet length must "
226 "be larger than %u and smaller than %u,"
227 "as jumbo frame is enabled",
228 (uint32_t)RTE_ETHER_MAX_LEN,
229 (uint32_t)ICE_FRAME_SIZE_MAX);
233 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
234 rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
235 PMD_DRV_LOG(ERR, "maximum packet length must be "
236 "larger than %u and smaller than %u, "
237 "as jumbo frame is disabled",
238 (uint32_t)RTE_ETHER_MIN_LEN,
239 (uint32_t)RTE_ETHER_MAX_LEN);
244 memset(&rx_ctx, 0, sizeof(rx_ctx));
246 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
247 rx_ctx.qlen = rxq->nb_rx_desc;
248 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
249 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
250 rx_ctx.dtype = 0; /* No Header Split mode */
251 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
252 rx_ctx.dsize = 1; /* 32B descriptors */
254 rx_ctx.rxmax = rxq->max_pkt_len;
255 /* TPH: Transaction Layer Packet (TLP) processing hints */
256 rx_ctx.tphrdesc_ena = 1;
257 rx_ctx.tphwdesc_ena = 1;
258 rx_ctx.tphdata_ena = 1;
259 rx_ctx.tphhead_ena = 1;
260 /* Low Receive Queue Threshold defined in 64 descriptors units.
261 * When the number of free descriptors goes below the lrxqthresh,
262 * an immediate interrupt is triggered.
264 rx_ctx.lrxqthresh = 2;
265 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
268 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
270 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
272 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
273 rxq->port_id, rxq->queue_id, rxdid);
275 if (!(pf->supported_rxdid & BIT(rxdid))) {
276 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
281 ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
283 /* Enable Flexible Descriptors in the queue context which
284 * allows this driver to select a specific receive descriptor format
286 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
287 QRXFLXP_CNTXT_RXDID_IDX_M;
289 /* increasing context priority to pick up profile ID;
290 * default is 0x01; setting to 0x03 to ensure profile
291 * is programming if prev context is of same priority
293 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
294 QRXFLXP_CNTXT_RXDID_PRIO_M;
296 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
298 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
300 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
304 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
306 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
311 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
312 RTE_PKTMBUF_HEADROOM);
314 /* Check if scattered RX needs to be used. */
315 if (rxq->max_pkt_len > buf_size)
316 dev->data->scattered_rx = 1;
318 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
320 /* Init the Rx tail register*/
321 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
326 /* Allocate mbufs for all descriptors in rx queue */
328 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
330 struct ice_rx_entry *rxe = rxq->sw_ring;
334 for (i = 0; i < rxq->nb_rx_desc; i++) {
335 volatile union ice_rx_flex_desc *rxd;
336 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
338 if (unlikely(!mbuf)) {
339 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
343 rte_mbuf_refcnt_set(mbuf, 1);
345 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
347 mbuf->port = rxq->port_id;
350 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
352 rxd = &rxq->rx_ring[i];
353 rxd->read.pkt_addr = dma_addr;
354 rxd->read.hdr_addr = 0;
355 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
365 /* Free all mbufs for descriptors in rx queue */
367 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
371 if (!rxq || !rxq->sw_ring) {
372 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
376 for (i = 0; i < rxq->nb_rx_desc; i++) {
377 if (rxq->sw_ring[i].mbuf) {
378 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
379 rxq->sw_ring[i].mbuf = NULL;
382 if (rxq->rx_nb_avail == 0)
384 for (i = 0; i < rxq->rx_nb_avail; i++)
385 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
387 rxq->rx_nb_avail = 0;
390 /* turn on or off rx queue
391 * @q_idx: queue index in pf scope
392 * @on: turn on or off the queue
395 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
400 /* QRX_CTRL = QRX_ENA */
401 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
404 if (reg & QRX_CTRL_QENA_STAT_M)
405 return 0; /* Already on, skip */
406 reg |= QRX_CTRL_QENA_REQ_M;
408 if (!(reg & QRX_CTRL_QENA_STAT_M))
409 return 0; /* Already off, skip */
410 reg &= ~QRX_CTRL_QENA_REQ_M;
413 /* Write the register */
414 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
415 /* Check the result. It is said that QENA_STAT
416 * follows the QENA_REQ not more than 10 use.
417 * TODO: need to change the wait counter later
419 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
420 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
421 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
423 if ((reg & QRX_CTRL_QENA_REQ_M) &&
424 (reg & QRX_CTRL_QENA_STAT_M))
427 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
428 !(reg & QRX_CTRL_QENA_STAT_M))
433 /* Check if it is timeout */
434 if (j >= ICE_CHK_Q_ENA_COUNT) {
435 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
436 (on ? "enable" : "disable"), q_idx);
444 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
448 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
449 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
450 "rxq->rx_free_thresh=%d, "
451 "ICE_RX_MAX_BURST=%d",
452 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
454 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
455 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
456 "rxq->rx_free_thresh=%d, "
457 "rxq->nb_rx_desc=%d",
458 rxq->rx_free_thresh, rxq->nb_rx_desc);
460 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
461 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
462 "rxq->nb_rx_desc=%d, "
463 "rxq->rx_free_thresh=%d",
464 rxq->nb_rx_desc, rxq->rx_free_thresh);
471 /* reset fields in ice_rx_queue back to default */
473 ice_reset_rx_queue(struct ice_rx_queue *rxq)
479 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
483 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
485 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
486 ((volatile char *)rxq->rx_ring)[i] = 0;
488 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
489 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
490 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
492 rxq->rx_nb_avail = 0;
493 rxq->rx_next_avail = 0;
494 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
498 rxq->pkt_first_seg = NULL;
499 rxq->pkt_last_seg = NULL;
501 rxq->rxrearm_start = 0;
506 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
508 struct ice_rx_queue *rxq;
510 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
512 PMD_INIT_FUNC_TRACE();
514 if (rx_queue_id >= dev->data->nb_rx_queues) {
515 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
516 rx_queue_id, dev->data->nb_rx_queues);
520 rxq = dev->data->rx_queues[rx_queue_id];
521 if (!rxq || !rxq->q_set) {
522 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
527 err = ice_program_hw_rx_queue(rxq);
529 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
534 err = ice_alloc_rx_queue_mbufs(rxq);
536 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
540 /* Init the RX tail register. */
541 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
543 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
545 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
548 rxq->rx_rel_mbufs(rxq);
549 ice_reset_rx_queue(rxq);
553 dev->data->rx_queue_state[rx_queue_id] =
554 RTE_ETH_QUEUE_STATE_STARTED;
560 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
562 struct ice_rx_queue *rxq;
564 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
566 if (rx_queue_id < dev->data->nb_rx_queues) {
567 rxq = dev->data->rx_queues[rx_queue_id];
569 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
571 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
575 rxq->rx_rel_mbufs(rxq);
576 ice_reset_rx_queue(rxq);
577 dev->data->rx_queue_state[rx_queue_id] =
578 RTE_ETH_QUEUE_STATE_STOPPED;
585 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
587 struct ice_tx_queue *txq;
591 struct ice_aqc_add_tx_qgrp *txq_elem;
592 struct ice_tlan_ctx tx_ctx;
595 PMD_INIT_FUNC_TRACE();
597 if (tx_queue_id >= dev->data->nb_tx_queues) {
598 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
599 tx_queue_id, dev->data->nb_tx_queues);
603 txq = dev->data->tx_queues[tx_queue_id];
604 if (!txq || !txq->q_set) {
605 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
610 buf_len = ice_struct_size(txq_elem, txqs, 1);
611 txq_elem = ice_malloc(hw, buf_len);
616 hw = ICE_VSI_TO_HW(vsi);
618 memset(&tx_ctx, 0, sizeof(tx_ctx));
619 txq_elem->num_txqs = 1;
620 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
622 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
623 tx_ctx.qlen = txq->nb_tx_desc;
624 tx_ctx.pf_num = hw->pf_id;
625 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
626 tx_ctx.src_vsi = vsi->vsi_id;
627 tx_ctx.port_num = hw->port_info->lport;
628 tx_ctx.tso_ena = 1; /* tso enable */
629 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
630 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
632 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
635 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
637 /* Init the Tx tail register*/
638 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
640 /* Fix me, we assume TC always 0 here */
641 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
642 txq_elem, buf_len, NULL);
644 PMD_DRV_LOG(ERR, "Failed to add lan txq");
648 /* store the schedule node id */
649 txq->q_teid = txq_elem->txqs[0].q_teid;
651 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
657 static enum ice_status
658 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
660 struct ice_vsi *vsi = rxq->vsi;
661 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
662 uint32_t rxdid = ICE_RXDID_LEGACY_1;
663 struct ice_rlan_ctx rx_ctx;
668 rxq->rx_buf_len = 1024;
670 memset(&rx_ctx, 0, sizeof(rx_ctx));
672 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
673 rx_ctx.qlen = rxq->nb_rx_desc;
674 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
675 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
676 rx_ctx.dtype = 0; /* No Header Split mode */
677 rx_ctx.dsize = 1; /* 32B descriptors */
678 rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
679 /* TPH: Transaction Layer Packet (TLP) processing hints */
680 rx_ctx.tphrdesc_ena = 1;
681 rx_ctx.tphwdesc_ena = 1;
682 rx_ctx.tphdata_ena = 1;
683 rx_ctx.tphhead_ena = 1;
684 /* Low Receive Queue Threshold defined in 64 descriptors units.
685 * When the number of free descriptors goes below the lrxqthresh,
686 * an immediate interrupt is triggered.
688 rx_ctx.lrxqthresh = 2;
689 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
692 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
694 /* Enable Flexible Descriptors in the queue context which
695 * allows this driver to select a specific receive descriptor format
697 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
698 QRXFLXP_CNTXT_RXDID_IDX_M;
700 /* increasing context priority to pick up profile ID;
701 * default is 0x01; setting to 0x03 to ensure profile
702 * is programming if prev context is of same priority
704 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
705 QRXFLXP_CNTXT_RXDID_PRIO_M;
707 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
709 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
711 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
715 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
717 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
722 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
724 /* Init the Rx tail register*/
725 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
731 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
733 struct ice_rx_queue *rxq;
735 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
736 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
738 PMD_INIT_FUNC_TRACE();
741 if (!rxq || !rxq->q_set) {
742 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
747 err = ice_fdir_program_hw_rx_queue(rxq);
749 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
754 /* Init the RX tail register. */
755 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
757 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
759 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
762 ice_reset_rx_queue(rxq);
770 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
772 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
773 struct ice_tx_queue *txq;
777 struct ice_aqc_add_tx_qgrp *txq_elem;
778 struct ice_tlan_ctx tx_ctx;
781 PMD_INIT_FUNC_TRACE();
784 if (!txq || !txq->q_set) {
785 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
790 buf_len = ice_struct_size(txq_elem, txqs, 1);
791 txq_elem = ice_malloc(hw, buf_len);
796 hw = ICE_VSI_TO_HW(vsi);
798 memset(&tx_ctx, 0, sizeof(tx_ctx));
799 txq_elem->num_txqs = 1;
800 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
802 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
803 tx_ctx.qlen = txq->nb_tx_desc;
804 tx_ctx.pf_num = hw->pf_id;
805 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
806 tx_ctx.src_vsi = vsi->vsi_id;
807 tx_ctx.port_num = hw->port_info->lport;
808 tx_ctx.tso_ena = 1; /* tso enable */
809 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
810 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
812 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
815 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
817 /* Init the Tx tail register*/
818 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
820 /* Fix me, we assume TC always 0 here */
821 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
822 txq_elem, buf_len, NULL);
824 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
828 /* store the schedule node id */
829 txq->q_teid = txq_elem->txqs[0].q_teid;
835 /* Free all mbufs for descriptors in tx queue */
837 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
841 if (!txq || !txq->sw_ring) {
842 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
846 for (i = 0; i < txq->nb_tx_desc; i++) {
847 if (txq->sw_ring[i].mbuf) {
848 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
849 txq->sw_ring[i].mbuf = NULL;
855 ice_reset_tx_queue(struct ice_tx_queue *txq)
857 struct ice_tx_entry *txe;
858 uint16_t i, prev, size;
861 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
866 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
867 for (i = 0; i < size; i++)
868 ((volatile char *)txq->tx_ring)[i] = 0;
870 prev = (uint16_t)(txq->nb_tx_desc - 1);
871 for (i = 0; i < txq->nb_tx_desc; i++) {
872 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
874 txd->cmd_type_offset_bsz =
875 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
878 txe[prev].next_id = i;
882 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
883 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
888 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
889 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
893 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
895 struct ice_tx_queue *txq;
896 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
897 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
898 struct ice_vsi *vsi = pf->main_vsi;
899 enum ice_status status;
902 uint16_t q_handle = tx_queue_id;
904 if (tx_queue_id >= dev->data->nb_tx_queues) {
905 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
906 tx_queue_id, dev->data->nb_tx_queues);
910 txq = dev->data->tx_queues[tx_queue_id];
912 PMD_DRV_LOG(ERR, "TX queue %u is not available",
917 q_ids[0] = txq->reg_idx;
918 q_teids[0] = txq->q_teid;
920 /* Fix me, we assume TC always 0 here */
921 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
922 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
923 if (status != ICE_SUCCESS) {
924 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
928 txq->tx_rel_mbufs(txq);
929 ice_reset_tx_queue(txq);
930 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
936 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
938 struct ice_rx_queue *rxq;
940 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
941 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
945 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
947 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
951 rxq->rx_rel_mbufs(rxq);
957 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
959 struct ice_tx_queue *txq;
960 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
961 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
962 struct ice_vsi *vsi = pf->main_vsi;
963 enum ice_status status;
966 uint16_t q_handle = tx_queue_id;
970 PMD_DRV_LOG(ERR, "TX queue %u is not available",
976 q_ids[0] = txq->reg_idx;
977 q_teids[0] = txq->q_teid;
979 /* Fix me, we assume TC always 0 here */
980 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
981 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
982 if (status != ICE_SUCCESS) {
983 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
987 txq->tx_rel_mbufs(txq);
993 ice_rx_queue_setup(struct rte_eth_dev *dev,
996 unsigned int socket_id,
997 const struct rte_eth_rxconf *rx_conf,
998 struct rte_mempool *mp)
1000 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1001 struct ice_adapter *ad =
1002 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1003 struct ice_vsi *vsi = pf->main_vsi;
1004 struct ice_rx_queue *rxq;
1005 const struct rte_memzone *rz;
1008 int use_def_burst_func = 1;
1010 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1011 nb_desc > ICE_MAX_RING_DESC ||
1012 nb_desc < ICE_MIN_RING_DESC) {
1013 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1014 "invalid", nb_desc);
1018 /* Free memory if needed */
1019 if (dev->data->rx_queues[queue_idx]) {
1020 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1021 dev->data->rx_queues[queue_idx] = NULL;
1024 /* Allocate the rx queue data structure */
1025 rxq = rte_zmalloc_socket(NULL,
1026 sizeof(struct ice_rx_queue),
1027 RTE_CACHE_LINE_SIZE,
1030 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1031 "rx queue data structure");
1035 rxq->nb_rx_desc = nb_desc;
1036 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1037 rxq->queue_id = queue_idx;
1039 rxq->reg_idx = vsi->base_queue + queue_idx;
1040 rxq->port_id = dev->data->port_id;
1041 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1042 rxq->crc_len = RTE_ETHER_CRC_LEN;
1046 rxq->drop_en = rx_conf->rx_drop_en;
1048 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1049 rxq->proto_xtr = pf->proto_xtr != NULL ?
1050 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1052 /* Allocate the maximun number of RX ring hardware descriptor. */
1053 len = ICE_MAX_RING_DESC;
1056 * Allocating a little more memory because vectorized/bulk_alloc Rx
1057 * functions doesn't check boundaries each time.
1059 len += ICE_RX_MAX_BURST;
1061 /* Allocate the maximum number of RX ring hardware descriptor. */
1062 ring_size = sizeof(union ice_rx_flex_desc) * len;
1063 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1064 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1065 ring_size, ICE_RING_BASE_ALIGN,
1068 ice_rx_queue_release(rxq);
1069 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1073 /* Zero all the descriptors in the ring. */
1074 memset(rz->addr, 0, ring_size);
1076 rxq->rx_ring_dma = rz->iova;
1077 rxq->rx_ring = rz->addr;
1079 /* always reserve more for bulk alloc */
1080 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1082 /* Allocate the software ring. */
1083 rxq->sw_ring = rte_zmalloc_socket(NULL,
1084 sizeof(struct ice_rx_entry) * len,
1085 RTE_CACHE_LINE_SIZE,
1087 if (!rxq->sw_ring) {
1088 ice_rx_queue_release(rxq);
1089 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1093 ice_reset_rx_queue(rxq);
1095 dev->data->rx_queues[queue_idx] = rxq;
1096 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1098 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1100 if (!use_def_burst_func) {
1101 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1102 "satisfied. Rx Burst Bulk Alloc function will be "
1103 "used on port=%d, queue=%d.",
1104 rxq->port_id, rxq->queue_id);
1106 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1107 "not satisfied, Scattered Rx is requested. "
1108 "on port=%d, queue=%d.",
1109 rxq->port_id, rxq->queue_id);
1110 ad->rx_bulk_alloc_allowed = false;
1117 ice_rx_queue_release(void *rxq)
1119 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1122 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1127 rte_free(q->sw_ring);
1132 ice_tx_queue_setup(struct rte_eth_dev *dev,
1135 unsigned int socket_id,
1136 const struct rte_eth_txconf *tx_conf)
1138 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1139 struct ice_vsi *vsi = pf->main_vsi;
1140 struct ice_tx_queue *txq;
1141 const struct rte_memzone *tz;
1143 uint16_t tx_rs_thresh, tx_free_thresh;
1146 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1148 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1149 nb_desc > ICE_MAX_RING_DESC ||
1150 nb_desc < ICE_MIN_RING_DESC) {
1151 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1152 "invalid", nb_desc);
1157 * The following two parameters control the setting of the RS bit on
1158 * transmit descriptors. TX descriptors will have their RS bit set
1159 * after txq->tx_rs_thresh descriptors have been used. The TX
1160 * descriptor ring will be cleaned after txq->tx_free_thresh
1161 * descriptors are used or if the number of descriptors required to
1162 * transmit a packet is greater than the number of free TX descriptors.
1164 * The following constraints must be satisfied:
1165 * - tx_rs_thresh must be greater than 0.
1166 * - tx_rs_thresh must be less than the size of the ring minus 2.
1167 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1168 * - tx_rs_thresh must be a divisor of the ring size.
1169 * - tx_free_thresh must be greater than 0.
1170 * - tx_free_thresh must be less than the size of the ring minus 3.
1171 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1173 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1174 * race condition, hence the maximum threshold constraints. When set
1175 * to zero use default values.
1177 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1178 tx_conf->tx_free_thresh :
1179 ICE_DEFAULT_TX_FREE_THRESH);
1180 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1182 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1183 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1184 if (tx_conf->tx_rs_thresh)
1185 tx_rs_thresh = tx_conf->tx_rs_thresh;
1186 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1187 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1188 "exceed nb_desc. (tx_rs_thresh=%u "
1189 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1190 (unsigned int)tx_rs_thresh,
1191 (unsigned int)tx_free_thresh,
1192 (unsigned int)nb_desc,
1193 (int)dev->data->port_id,
1197 if (tx_rs_thresh >= (nb_desc - 2)) {
1198 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1199 "number of TX descriptors minus 2. "
1200 "(tx_rs_thresh=%u port=%d queue=%d)",
1201 (unsigned int)tx_rs_thresh,
1202 (int)dev->data->port_id,
1206 if (tx_free_thresh >= (nb_desc - 3)) {
1207 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1208 "tx_free_thresh must be less than the "
1209 "number of TX descriptors minus 3. "
1210 "(tx_free_thresh=%u port=%d queue=%d)",
1211 (unsigned int)tx_free_thresh,
1212 (int)dev->data->port_id,
1216 if (tx_rs_thresh > tx_free_thresh) {
1217 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1218 "equal to tx_free_thresh. (tx_free_thresh=%u"
1219 " tx_rs_thresh=%u port=%d queue=%d)",
1220 (unsigned int)tx_free_thresh,
1221 (unsigned int)tx_rs_thresh,
1222 (int)dev->data->port_id,
1226 if ((nb_desc % tx_rs_thresh) != 0) {
1227 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1228 "number of TX descriptors. (tx_rs_thresh=%u"
1229 " port=%d queue=%d)",
1230 (unsigned int)tx_rs_thresh,
1231 (int)dev->data->port_id,
1235 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1236 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1237 "tx_rs_thresh is greater than 1. "
1238 "(tx_rs_thresh=%u port=%d queue=%d)",
1239 (unsigned int)tx_rs_thresh,
1240 (int)dev->data->port_id,
1245 /* Free memory if needed. */
1246 if (dev->data->tx_queues[queue_idx]) {
1247 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1248 dev->data->tx_queues[queue_idx] = NULL;
1251 /* Allocate the TX queue data structure. */
1252 txq = rte_zmalloc_socket(NULL,
1253 sizeof(struct ice_tx_queue),
1254 RTE_CACHE_LINE_SIZE,
1257 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1258 "tx queue structure");
1262 /* Allocate TX hardware ring descriptors. */
1263 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1264 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1265 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1266 ring_size, ICE_RING_BASE_ALIGN,
1269 ice_tx_queue_release(txq);
1270 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1274 txq->nb_tx_desc = nb_desc;
1275 txq->tx_rs_thresh = tx_rs_thresh;
1276 txq->tx_free_thresh = tx_free_thresh;
1277 txq->pthresh = tx_conf->tx_thresh.pthresh;
1278 txq->hthresh = tx_conf->tx_thresh.hthresh;
1279 txq->wthresh = tx_conf->tx_thresh.wthresh;
1280 txq->queue_id = queue_idx;
1282 txq->reg_idx = vsi->base_queue + queue_idx;
1283 txq->port_id = dev->data->port_id;
1284 txq->offloads = offloads;
1286 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1288 txq->tx_ring_dma = tz->iova;
1289 txq->tx_ring = tz->addr;
1291 /* Allocate software ring */
1293 rte_zmalloc_socket(NULL,
1294 sizeof(struct ice_tx_entry) * nb_desc,
1295 RTE_CACHE_LINE_SIZE,
1297 if (!txq->sw_ring) {
1298 ice_tx_queue_release(txq);
1299 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1303 ice_reset_tx_queue(txq);
1305 dev->data->tx_queues[queue_idx] = txq;
1306 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1307 ice_set_tx_function_flag(dev, txq);
1313 ice_tx_queue_release(void *txq)
1315 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1318 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1323 rte_free(q->sw_ring);
1328 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1329 struct rte_eth_rxq_info *qinfo)
1331 struct ice_rx_queue *rxq;
1333 rxq = dev->data->rx_queues[queue_id];
1335 qinfo->mp = rxq->mp;
1336 qinfo->scattered_rx = dev->data->scattered_rx;
1337 qinfo->nb_desc = rxq->nb_rx_desc;
1339 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1340 qinfo->conf.rx_drop_en = rxq->drop_en;
1341 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1345 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1346 struct rte_eth_txq_info *qinfo)
1348 struct ice_tx_queue *txq;
1350 txq = dev->data->tx_queues[queue_id];
1352 qinfo->nb_desc = txq->nb_tx_desc;
1354 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1355 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1356 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1358 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1359 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1360 qinfo->conf.offloads = txq->offloads;
1361 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1365 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1367 #define ICE_RXQ_SCAN_INTERVAL 4
1368 volatile union ice_rx_flex_desc *rxdp;
1369 struct ice_rx_queue *rxq;
1372 rxq = dev->data->rx_queues[rx_queue_id];
1373 rxdp = &rxq->rx_ring[rxq->rx_tail];
1374 while ((desc < rxq->nb_rx_desc) &&
1375 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1376 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1378 * Check the DD bit of a rx descriptor of each 4 in a group,
1379 * to avoid checking too frequently and downgrading performance
1382 desc += ICE_RXQ_SCAN_INTERVAL;
1383 rxdp += ICE_RXQ_SCAN_INTERVAL;
1384 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1385 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1386 desc - rxq->nb_rx_desc]);
1392 #define ICE_RX_FLEX_ERR0_BITS \
1393 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1394 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1395 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1396 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1397 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1398 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1400 /* Rx L3/L4 checksum */
1401 static inline uint64_t
1402 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1406 /* check if HW has decoded the packet and checksum */
1407 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1410 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1411 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1415 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1416 flags |= PKT_RX_IP_CKSUM_BAD;
1418 flags |= PKT_RX_IP_CKSUM_GOOD;
1420 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1421 flags |= PKT_RX_L4_CKSUM_BAD;
1423 flags |= PKT_RX_L4_CKSUM_GOOD;
1425 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1426 flags |= PKT_RX_EIP_CKSUM_BAD;
1432 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1434 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1435 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1436 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1438 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1439 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1440 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1445 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1446 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1447 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1448 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1449 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1450 mb->vlan_tci_outer = mb->vlan_tci;
1451 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1452 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1453 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1454 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1456 mb->vlan_tci_outer = 0;
1459 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1460 mb->vlan_tci, mb->vlan_tci_outer);
1463 #define ICE_LOOK_AHEAD 8
1464 #if (ICE_LOOK_AHEAD != 8)
1465 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1468 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1470 volatile union ice_rx_flex_desc *rxdp;
1471 struct ice_rx_entry *rxep;
1472 struct rte_mbuf *mb;
1475 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1476 int32_t i, j, nb_rx = 0;
1477 uint64_t pkt_flags = 0;
1478 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1480 rxdp = &rxq->rx_ring[rxq->rx_tail];
1481 rxep = &rxq->sw_ring[rxq->rx_tail];
1483 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1485 /* Make sure there is at least 1 packet to receive */
1486 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1490 * Scan LOOK_AHEAD descriptors at a time to determine which
1491 * descriptors reference packets that are ready to be received.
1493 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1494 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1495 /* Read desc statuses backwards to avoid race condition */
1496 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1497 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1501 /* Compute how many status bits were set */
1502 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1503 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1507 /* Translate descriptor info to mbuf parameters */
1508 for (j = 0; j < nb_dd; j++) {
1510 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1511 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1512 mb->data_len = pkt_len;
1513 mb->pkt_len = pkt_len;
1515 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1516 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1517 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1518 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1519 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1520 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1522 mb->ol_flags |= pkt_flags;
1525 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1526 rxq->rx_stage[i + j] = rxep[j].mbuf;
1528 if (nb_dd != ICE_LOOK_AHEAD)
1532 /* Clear software ring entries */
1533 for (i = 0; i < nb_rx; i++)
1534 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1536 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1537 "port_id=%u, queue_id=%u, nb_rx=%d",
1538 rxq->port_id, rxq->queue_id, nb_rx);
1543 static inline uint16_t
1544 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1545 struct rte_mbuf **rx_pkts,
1549 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1551 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1553 for (i = 0; i < nb_pkts; i++)
1554 rx_pkts[i] = stage[i];
1556 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1557 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1563 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1565 volatile union ice_rx_flex_desc *rxdp;
1566 struct ice_rx_entry *rxep;
1567 struct rte_mbuf *mb;
1568 uint16_t alloc_idx, i;
1572 /* Allocate buffers in bulk */
1573 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1574 (rxq->rx_free_thresh - 1));
1575 rxep = &rxq->sw_ring[alloc_idx];
1576 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1577 rxq->rx_free_thresh);
1578 if (unlikely(diag != 0)) {
1579 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1583 rxdp = &rxq->rx_ring[alloc_idx];
1584 for (i = 0; i < rxq->rx_free_thresh; i++) {
1585 if (likely(i < (rxq->rx_free_thresh - 1)))
1586 /* Prefetch next mbuf */
1587 rte_prefetch0(rxep[i + 1].mbuf);
1590 rte_mbuf_refcnt_set(mb, 1);
1592 mb->data_off = RTE_PKTMBUF_HEADROOM;
1594 mb->port = rxq->port_id;
1595 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1596 rxdp[i].read.hdr_addr = 0;
1597 rxdp[i].read.pkt_addr = dma_addr;
1600 /* Update rx tail regsiter */
1601 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1603 rxq->rx_free_trigger =
1604 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1605 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1606 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1611 static inline uint16_t
1612 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1614 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1616 struct rte_eth_dev *dev;
1621 if (rxq->rx_nb_avail)
1622 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1624 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1625 rxq->rx_next_avail = 0;
1626 rxq->rx_nb_avail = nb_rx;
1627 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1629 if (rxq->rx_tail > rxq->rx_free_trigger) {
1630 if (ice_rx_alloc_bufs(rxq) != 0) {
1633 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1634 dev->data->rx_mbuf_alloc_failed +=
1635 rxq->rx_free_thresh;
1636 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1637 "port_id=%u, queue_id=%u",
1638 rxq->port_id, rxq->queue_id);
1639 rxq->rx_nb_avail = 0;
1640 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1641 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1642 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1648 if (rxq->rx_tail >= rxq->nb_rx_desc)
1651 if (rxq->rx_nb_avail)
1652 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1658 ice_recv_pkts_bulk_alloc(void *rx_queue,
1659 struct rte_mbuf **rx_pkts,
1666 if (unlikely(nb_pkts == 0))
1669 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1670 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1673 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1674 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1675 nb_rx = (uint16_t)(nb_rx + count);
1676 nb_pkts = (uint16_t)(nb_pkts - count);
1685 ice_recv_scattered_pkts(void *rx_queue,
1686 struct rte_mbuf **rx_pkts,
1689 struct ice_rx_queue *rxq = rx_queue;
1690 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1691 volatile union ice_rx_flex_desc *rxdp;
1692 union ice_rx_flex_desc rxd;
1693 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1694 struct ice_rx_entry *rxe;
1695 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1696 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1697 struct rte_mbuf *nmb; /* new allocated mbuf */
1698 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1699 uint16_t rx_id = rxq->rx_tail;
1701 uint16_t nb_hold = 0;
1702 uint16_t rx_packet_len;
1703 uint16_t rx_stat_err0;
1706 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1707 struct rte_eth_dev *dev;
1709 while (nb_rx < nb_pkts) {
1710 rxdp = &rx_ring[rx_id];
1711 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1713 /* Check the DD bit first */
1714 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1718 nmb = rte_mbuf_raw_alloc(rxq->mp);
1719 if (unlikely(!nmb)) {
1720 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1721 dev->data->rx_mbuf_alloc_failed++;
1724 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1727 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1729 if (unlikely(rx_id == rxq->nb_rx_desc))
1732 /* Prefetch next mbuf */
1733 rte_prefetch0(sw_ring[rx_id].mbuf);
1736 * When next RX descriptor is on a cache line boundary,
1737 * prefetch the next 4 RX descriptors and next 8 pointers
1740 if ((rx_id & 0x3) == 0) {
1741 rte_prefetch0(&rx_ring[rx_id]);
1742 rte_prefetch0(&sw_ring[rx_id]);
1748 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1750 /* Set data buffer address and data length of the mbuf */
1751 rxdp->read.hdr_addr = 0;
1752 rxdp->read.pkt_addr = dma_addr;
1753 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1754 ICE_RX_FLX_DESC_PKT_LEN_M;
1755 rxm->data_len = rx_packet_len;
1756 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1759 * If this is the first buffer of the received packet, set the
1760 * pointer to the first mbuf of the packet and initialize its
1761 * context. Otherwise, update the total length and the number
1762 * of segments of the current scattered packet, and update the
1763 * pointer to the last mbuf of the current packet.
1767 first_seg->nb_segs = 1;
1768 first_seg->pkt_len = rx_packet_len;
1770 first_seg->pkt_len =
1771 (uint16_t)(first_seg->pkt_len +
1773 first_seg->nb_segs++;
1774 last_seg->next = rxm;
1778 * If this is not the last buffer of the received packet,
1779 * update the pointer to the last mbuf of the current scattered
1780 * packet and continue to parse the RX ring.
1782 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1788 * This is the last buffer of the received packet. If the CRC
1789 * is not stripped by the hardware:
1790 * - Subtract the CRC length from the total packet length.
1791 * - If the last buffer only contains the whole CRC or a part
1792 * of it, free the mbuf associated to the last buffer. If part
1793 * of the CRC is also contained in the previous mbuf, subtract
1794 * the length of that CRC part from the data length of the
1798 if (unlikely(rxq->crc_len > 0)) {
1799 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1800 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1801 rte_pktmbuf_free_seg(rxm);
1802 first_seg->nb_segs--;
1803 last_seg->data_len =
1804 (uint16_t)(last_seg->data_len -
1805 (RTE_ETHER_CRC_LEN - rx_packet_len));
1806 last_seg->next = NULL;
1808 rxm->data_len = (uint16_t)(rx_packet_len -
1812 first_seg->port = rxq->port_id;
1813 first_seg->ol_flags = 0;
1814 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1815 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1816 ice_rxd_to_vlan_tci(first_seg, &rxd);
1817 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1818 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1819 first_seg->ol_flags |= pkt_flags;
1820 /* Prefetch data of first segment, if configured to do so. */
1821 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1822 first_seg->data_off));
1823 rx_pkts[nb_rx++] = first_seg;
1827 /* Record index of the next RX descriptor to probe. */
1828 rxq->rx_tail = rx_id;
1829 rxq->pkt_first_seg = first_seg;
1830 rxq->pkt_last_seg = last_seg;
1833 * If the number of free RX descriptors is greater than the RX free
1834 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1835 * register. Update the RDT with the value of the last processed RX
1836 * descriptor minus 1, to guarantee that the RDT register is never
1837 * equal to the RDH register, which creates a "full" ring situtation
1838 * from the hardware point of view.
1840 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1841 if (nb_hold > rxq->rx_free_thresh) {
1842 rx_id = (uint16_t)(rx_id == 0 ?
1843 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1844 /* write TAIL register */
1845 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1848 rxq->nb_rx_hold = nb_hold;
1850 /* return received packet in the burst */
1855 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1857 struct ice_adapter *ad =
1858 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1859 const uint32_t *ptypes;
1861 static const uint32_t ptypes_os[] = {
1862 /* refers to ice_get_default_pkt_type() */
1864 RTE_PTYPE_L2_ETHER_TIMESYNC,
1865 RTE_PTYPE_L2_ETHER_LLDP,
1866 RTE_PTYPE_L2_ETHER_ARP,
1867 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1868 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1871 RTE_PTYPE_L4_NONFRAG,
1875 RTE_PTYPE_TUNNEL_GRENAT,
1876 RTE_PTYPE_TUNNEL_IP,
1877 RTE_PTYPE_INNER_L2_ETHER,
1878 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1879 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1880 RTE_PTYPE_INNER_L4_FRAG,
1881 RTE_PTYPE_INNER_L4_ICMP,
1882 RTE_PTYPE_INNER_L4_NONFRAG,
1883 RTE_PTYPE_INNER_L4_SCTP,
1884 RTE_PTYPE_INNER_L4_TCP,
1885 RTE_PTYPE_INNER_L4_UDP,
1889 static const uint32_t ptypes_comms[] = {
1890 /* refers to ice_get_default_pkt_type() */
1892 RTE_PTYPE_L2_ETHER_TIMESYNC,
1893 RTE_PTYPE_L2_ETHER_LLDP,
1894 RTE_PTYPE_L2_ETHER_ARP,
1895 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1896 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1899 RTE_PTYPE_L4_NONFRAG,
1903 RTE_PTYPE_TUNNEL_GRENAT,
1904 RTE_PTYPE_TUNNEL_IP,
1905 RTE_PTYPE_INNER_L2_ETHER,
1906 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1907 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1908 RTE_PTYPE_INNER_L4_FRAG,
1909 RTE_PTYPE_INNER_L4_ICMP,
1910 RTE_PTYPE_INNER_L4_NONFRAG,
1911 RTE_PTYPE_INNER_L4_SCTP,
1912 RTE_PTYPE_INNER_L4_TCP,
1913 RTE_PTYPE_INNER_L4_UDP,
1914 RTE_PTYPE_TUNNEL_GTPC,
1915 RTE_PTYPE_TUNNEL_GTPU,
1916 RTE_PTYPE_L2_ETHER_PPPOE,
1920 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1921 ptypes = ptypes_comms;
1925 if (dev->rx_pkt_burst == ice_recv_pkts ||
1926 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1927 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1931 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1932 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1933 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1934 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1942 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1944 volatile union ice_rx_flex_desc *rxdp;
1945 struct ice_rx_queue *rxq = rx_queue;
1948 if (unlikely(offset >= rxq->nb_rx_desc))
1951 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1952 return RTE_ETH_RX_DESC_UNAVAIL;
1954 desc = rxq->rx_tail + offset;
1955 if (desc >= rxq->nb_rx_desc)
1956 desc -= rxq->nb_rx_desc;
1958 rxdp = &rxq->rx_ring[desc];
1959 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1960 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
1961 return RTE_ETH_RX_DESC_DONE;
1963 return RTE_ETH_RX_DESC_AVAIL;
1967 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1969 struct ice_tx_queue *txq = tx_queue;
1970 volatile uint64_t *status;
1971 uint64_t mask, expect;
1974 if (unlikely(offset >= txq->nb_tx_desc))
1977 desc = txq->tx_tail + offset;
1978 /* go to next desc that has the RS bit */
1979 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1981 if (desc >= txq->nb_tx_desc) {
1982 desc -= txq->nb_tx_desc;
1983 if (desc >= txq->nb_tx_desc)
1984 desc -= txq->nb_tx_desc;
1987 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1988 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1989 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1990 ICE_TXD_QW1_DTYPE_S);
1991 if ((*status & mask) == expect)
1992 return RTE_ETH_TX_DESC_DONE;
1994 return RTE_ETH_TX_DESC_FULL;
1998 ice_free_queues(struct rte_eth_dev *dev)
2002 PMD_INIT_FUNC_TRACE();
2004 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2005 if (!dev->data->rx_queues[i])
2007 ice_rx_queue_release(dev->data->rx_queues[i]);
2008 dev->data->rx_queues[i] = NULL;
2009 rte_eth_dma_zone_free(dev, "rx_ring", i);
2011 dev->data->nb_rx_queues = 0;
2013 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2014 if (!dev->data->tx_queues[i])
2016 ice_tx_queue_release(dev->data->tx_queues[i]);
2017 dev->data->tx_queues[i] = NULL;
2018 rte_eth_dma_zone_free(dev, "tx_ring", i);
2020 dev->data->nb_tx_queues = 0;
2023 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
2024 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
2027 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2029 struct ice_tx_queue *txq;
2030 const struct rte_memzone *tz = NULL;
2032 struct rte_eth_dev *dev;
2035 PMD_DRV_LOG(ERR, "PF is not available");
2039 dev = pf->adapter->eth_dev;
2041 /* Allocate the TX queue data structure. */
2042 txq = rte_zmalloc_socket("ice fdir tx queue",
2043 sizeof(struct ice_tx_queue),
2044 RTE_CACHE_LINE_SIZE,
2047 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2048 "tx queue structure.");
2052 /* Allocate TX hardware ring descriptors. */
2053 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2054 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2056 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2057 ICE_FDIR_QUEUE_ID, ring_size,
2058 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2060 ice_tx_queue_release(txq);
2061 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2065 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2066 txq->queue_id = ICE_FDIR_QUEUE_ID;
2067 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2068 txq->vsi = pf->fdir.fdir_vsi;
2070 txq->tx_ring_dma = tz->iova;
2071 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2073 * don't need to allocate software ring and reset for the fdir
2074 * program queue just set the queue has been configured.
2079 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2085 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2087 struct ice_rx_queue *rxq;
2088 const struct rte_memzone *rz = NULL;
2090 struct rte_eth_dev *dev;
2093 PMD_DRV_LOG(ERR, "PF is not available");
2097 dev = pf->adapter->eth_dev;
2099 /* Allocate the RX queue data structure. */
2100 rxq = rte_zmalloc_socket("ice fdir rx queue",
2101 sizeof(struct ice_rx_queue),
2102 RTE_CACHE_LINE_SIZE,
2105 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2106 "rx queue structure.");
2110 /* Allocate RX hardware ring descriptors. */
2111 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2112 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2114 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2115 ICE_FDIR_QUEUE_ID, ring_size,
2116 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2118 ice_rx_queue_release(rxq);
2119 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2123 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2124 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2125 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2126 rxq->vsi = pf->fdir.fdir_vsi;
2128 rxq->rx_ring_dma = rz->iova;
2129 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2130 sizeof(union ice_32byte_rx_desc));
2131 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2134 * Don't need to allocate software ring and reset for the fdir
2135 * rx queue, just set the queue has been configured.
2140 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2146 ice_recv_pkts(void *rx_queue,
2147 struct rte_mbuf **rx_pkts,
2150 struct ice_rx_queue *rxq = rx_queue;
2151 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2152 volatile union ice_rx_flex_desc *rxdp;
2153 union ice_rx_flex_desc rxd;
2154 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2155 struct ice_rx_entry *rxe;
2156 struct rte_mbuf *nmb; /* new allocated mbuf */
2157 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2158 uint16_t rx_id = rxq->rx_tail;
2160 uint16_t nb_hold = 0;
2161 uint16_t rx_packet_len;
2162 uint16_t rx_stat_err0;
2165 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2166 struct rte_eth_dev *dev;
2168 while (nb_rx < nb_pkts) {
2169 rxdp = &rx_ring[rx_id];
2170 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2172 /* Check the DD bit first */
2173 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2177 nmb = rte_mbuf_raw_alloc(rxq->mp);
2178 if (unlikely(!nmb)) {
2179 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2180 dev->data->rx_mbuf_alloc_failed++;
2183 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2186 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2188 if (unlikely(rx_id == rxq->nb_rx_desc))
2193 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2196 * fill the read format of descriptor with physic address in
2197 * new allocated mbuf: nmb
2199 rxdp->read.hdr_addr = 0;
2200 rxdp->read.pkt_addr = dma_addr;
2202 /* calculate rx_packet_len of the received pkt */
2203 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2204 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2206 /* fill old mbuf with received descriptor: rxd */
2207 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2208 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2211 rxm->pkt_len = rx_packet_len;
2212 rxm->data_len = rx_packet_len;
2213 rxm->port = rxq->port_id;
2214 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2215 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2216 ice_rxd_to_vlan_tci(rxm, &rxd);
2217 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
2218 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2219 rxm->ol_flags |= pkt_flags;
2220 /* copy old mbuf to rx_pkts */
2221 rx_pkts[nb_rx++] = rxm;
2223 rxq->rx_tail = rx_id;
2225 * If the number of free RX descriptors is greater than the RX free
2226 * threshold of the queue, advance the receive tail register of queue.
2227 * Update that register with the value of the last processed RX
2228 * descriptor minus 1.
2230 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2231 if (nb_hold > rxq->rx_free_thresh) {
2232 rx_id = (uint16_t)(rx_id == 0 ?
2233 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2234 /* write TAIL register */
2235 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2238 rxq->nb_rx_hold = nb_hold;
2240 /* return received packet in the burst */
2245 ice_parse_tunneling_params(uint64_t ol_flags,
2246 union ice_tx_offload tx_offload,
2247 uint32_t *cd_tunneling)
2249 /* EIPT: External (outer) IP header type */
2250 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2251 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2252 else if (ol_flags & PKT_TX_OUTER_IPV4)
2253 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2254 else if (ol_flags & PKT_TX_OUTER_IPV6)
2255 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2257 /* EIPLEN: External (outer) IP header length, in DWords */
2258 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2259 ICE_TXD_CTX_QW0_EIPLEN_S;
2261 /* L4TUNT: L4 Tunneling Type */
2262 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2263 case PKT_TX_TUNNEL_IPIP:
2264 /* for non UDP / GRE tunneling, set to 00b */
2266 case PKT_TX_TUNNEL_VXLAN:
2267 case PKT_TX_TUNNEL_GTP:
2268 case PKT_TX_TUNNEL_GENEVE:
2269 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2271 case PKT_TX_TUNNEL_GRE:
2272 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2275 PMD_TX_LOG(ERR, "Tunnel type not supported");
2279 /* L4TUNLEN: L4 Tunneling Length, in Words
2281 * We depend on app to set rte_mbuf.l2_len correctly.
2282 * For IP in GRE it should be set to the length of the GRE
2284 * For MAC in GRE or MAC in UDP it should be set to the length
2285 * of the GRE or UDP headers plus the inner MAC up to including
2286 * its last Ethertype.
2287 * If MPLS labels exists, it should include them as well.
2289 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2290 ICE_TXD_CTX_QW0_NATLEN_S;
2292 if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
2293 (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2294 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2295 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2299 ice_txd_enable_checksum(uint64_t ol_flags,
2301 uint32_t *td_offset,
2302 union ice_tx_offload tx_offload)
2305 if (ol_flags & PKT_TX_TUNNEL_MASK)
2306 *td_offset |= (tx_offload.outer_l2_len >> 1)
2307 << ICE_TX_DESC_LEN_MACLEN_S;
2309 *td_offset |= (tx_offload.l2_len >> 1)
2310 << ICE_TX_DESC_LEN_MACLEN_S;
2312 /* Enable L3 checksum offloads */
2313 if (ol_flags & PKT_TX_IP_CKSUM) {
2314 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2315 *td_offset |= (tx_offload.l3_len >> 2) <<
2316 ICE_TX_DESC_LEN_IPLEN_S;
2317 } else if (ol_flags & PKT_TX_IPV4) {
2318 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2319 *td_offset |= (tx_offload.l3_len >> 2) <<
2320 ICE_TX_DESC_LEN_IPLEN_S;
2321 } else if (ol_flags & PKT_TX_IPV6) {
2322 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2323 *td_offset |= (tx_offload.l3_len >> 2) <<
2324 ICE_TX_DESC_LEN_IPLEN_S;
2327 if (ol_flags & PKT_TX_TCP_SEG) {
2328 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2329 *td_offset |= (tx_offload.l4_len >> 2) <<
2330 ICE_TX_DESC_LEN_L4_LEN_S;
2334 /* Enable L4 checksum offloads */
2335 switch (ol_flags & PKT_TX_L4_MASK) {
2336 case PKT_TX_TCP_CKSUM:
2337 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2338 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2339 ICE_TX_DESC_LEN_L4_LEN_S;
2341 case PKT_TX_SCTP_CKSUM:
2342 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2343 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2344 ICE_TX_DESC_LEN_L4_LEN_S;
2346 case PKT_TX_UDP_CKSUM:
2347 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2348 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2349 ICE_TX_DESC_LEN_L4_LEN_S;
2357 ice_xmit_cleanup(struct ice_tx_queue *txq)
2359 struct ice_tx_entry *sw_ring = txq->sw_ring;
2360 volatile struct ice_tx_desc *txd = txq->tx_ring;
2361 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2362 uint16_t nb_tx_desc = txq->nb_tx_desc;
2363 uint16_t desc_to_clean_to;
2364 uint16_t nb_tx_to_clean;
2366 /* Determine the last descriptor needing to be cleaned */
2367 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2368 if (desc_to_clean_to >= nb_tx_desc)
2369 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2371 /* Check to make sure the last descriptor to clean is done */
2372 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2373 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2374 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2375 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
2376 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2378 txq->port_id, txq->queue_id,
2379 txd[desc_to_clean_to].cmd_type_offset_bsz);
2380 /* Failed to clean any descriptors */
2384 /* Figure out how many descriptors will be cleaned */
2385 if (last_desc_cleaned > desc_to_clean_to)
2386 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2389 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2392 /* The last descriptor to clean is done, so that means all the
2393 * descriptors from the last descriptor that was cleaned
2394 * up to the last descriptor with the RS bit set
2395 * are done. Only reset the threshold descriptor.
2397 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2399 /* Update the txq to reflect the last descriptor that was cleaned */
2400 txq->last_desc_cleaned = desc_to_clean_to;
2401 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2406 /* Construct the tx flags */
2407 static inline uint64_t
2408 ice_build_ctob(uint32_t td_cmd,
2413 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2414 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2415 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2416 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2417 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2420 /* Check if the context descriptor is needed for TX offloading */
2421 static inline uint16_t
2422 ice_calc_context_desc(uint64_t flags)
2424 static uint64_t mask = PKT_TX_TCP_SEG |
2426 PKT_TX_OUTER_IP_CKSUM |
2429 return (flags & mask) ? 1 : 0;
2432 /* set ice TSO context descriptor */
2433 static inline uint64_t
2434 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2436 uint64_t ctx_desc = 0;
2437 uint32_t cd_cmd, hdr_len, cd_tso_len;
2439 if (!tx_offload.l4_len) {
2440 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2444 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2445 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2446 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2448 cd_cmd = ICE_TX_CTX_DESC_TSO;
2449 cd_tso_len = mbuf->pkt_len - hdr_len;
2450 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2451 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2452 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2457 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2458 #define ICE_MAX_DATA_PER_TXD \
2459 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2460 /* Calculate the number of TX descriptors needed for each pkt */
2461 static inline uint16_t
2462 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2464 struct rte_mbuf *txd = tx_pkt;
2467 while (txd != NULL) {
2468 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2476 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2478 struct ice_tx_queue *txq;
2479 volatile struct ice_tx_desc *tx_ring;
2480 volatile struct ice_tx_desc *txd;
2481 struct ice_tx_entry *sw_ring;
2482 struct ice_tx_entry *txe, *txn;
2483 struct rte_mbuf *tx_pkt;
2484 struct rte_mbuf *m_seg;
2485 uint32_t cd_tunneling_params;
2490 uint32_t td_cmd = 0;
2491 uint32_t td_offset = 0;
2492 uint32_t td_tag = 0;
2495 uint64_t buf_dma_addr;
2497 union ice_tx_offload tx_offload = {0};
2500 sw_ring = txq->sw_ring;
2501 tx_ring = txq->tx_ring;
2502 tx_id = txq->tx_tail;
2503 txe = &sw_ring[tx_id];
2505 /* Check if the descriptor ring needs to be cleaned. */
2506 if (txq->nb_tx_free < txq->tx_free_thresh)
2507 (void)ice_xmit_cleanup(txq);
2509 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2510 tx_pkt = *tx_pkts++;
2515 ol_flags = tx_pkt->ol_flags;
2516 tx_offload.l2_len = tx_pkt->l2_len;
2517 tx_offload.l3_len = tx_pkt->l3_len;
2518 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2519 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2520 tx_offload.l4_len = tx_pkt->l4_len;
2521 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2522 /* Calculate the number of context descriptors needed. */
2523 nb_ctx = ice_calc_context_desc(ol_flags);
2525 /* The number of descriptors that must be allocated for
2526 * a packet equals to the number of the segments of that
2527 * packet plus the number of context descriptor if needed.
2528 * Recalculate the needed tx descs when TSO enabled in case
2529 * the mbuf data size exceeds max data size that hw allows
2532 if (ol_flags & PKT_TX_TCP_SEG)
2533 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2536 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2537 tx_last = (uint16_t)(tx_id + nb_used - 1);
2540 if (tx_last >= txq->nb_tx_desc)
2541 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2543 if (nb_used > txq->nb_tx_free) {
2544 if (ice_xmit_cleanup(txq) != 0) {
2549 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2550 while (nb_used > txq->nb_tx_free) {
2551 if (ice_xmit_cleanup(txq) != 0) {
2560 /* Descriptor based VLAN insertion */
2561 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2562 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2563 td_tag = tx_pkt->vlan_tci;
2566 /* Fill in tunneling parameters if necessary */
2567 cd_tunneling_params = 0;
2568 if (ol_flags & PKT_TX_TUNNEL_MASK)
2569 ice_parse_tunneling_params(ol_flags, tx_offload,
2570 &cd_tunneling_params);
2572 /* Enable checksum offloading */
2573 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2574 ice_txd_enable_checksum(ol_flags, &td_cmd,
2575 &td_offset, tx_offload);
2578 /* Setup TX context descriptor if required */
2579 volatile struct ice_tx_ctx_desc *ctx_txd =
2580 (volatile struct ice_tx_ctx_desc *)
2582 uint16_t cd_l2tag2 = 0;
2583 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2585 txn = &sw_ring[txe->next_id];
2586 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2588 rte_pktmbuf_free_seg(txe->mbuf);
2592 if (ol_flags & PKT_TX_TCP_SEG)
2593 cd_type_cmd_tso_mss |=
2594 ice_set_tso_ctx(tx_pkt, tx_offload);
2596 ctx_txd->tunneling_params =
2597 rte_cpu_to_le_32(cd_tunneling_params);
2599 /* TX context descriptor based double VLAN insert */
2600 if (ol_flags & PKT_TX_QINQ) {
2601 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2602 cd_type_cmd_tso_mss |=
2603 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2604 ICE_TXD_CTX_QW1_CMD_S);
2606 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2608 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2610 txe->last_id = tx_last;
2611 tx_id = txe->next_id;
2617 txd = &tx_ring[tx_id];
2618 txn = &sw_ring[txe->next_id];
2621 rte_pktmbuf_free_seg(txe->mbuf);
2624 /* Setup TX Descriptor */
2625 slen = m_seg->data_len;
2626 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2628 while ((ol_flags & PKT_TX_TCP_SEG) &&
2629 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2630 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2631 txd->cmd_type_offset_bsz =
2632 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2633 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2634 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2635 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2636 ICE_TXD_QW1_TX_BUF_SZ_S) |
2637 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2639 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2640 slen -= ICE_MAX_DATA_PER_TXD;
2642 txe->last_id = tx_last;
2643 tx_id = txe->next_id;
2645 txd = &tx_ring[tx_id];
2646 txn = &sw_ring[txe->next_id];
2649 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2650 txd->cmd_type_offset_bsz =
2651 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2652 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2653 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2654 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2655 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2657 txe->last_id = tx_last;
2658 tx_id = txe->next_id;
2660 m_seg = m_seg->next;
2663 /* fill the last descriptor with End of Packet (EOP) bit */
2664 td_cmd |= ICE_TX_DESC_CMD_EOP;
2665 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2666 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2668 /* set RS bit on the last descriptor of one packet */
2669 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2670 PMD_TX_FREE_LOG(DEBUG,
2671 "Setting RS bit on TXD id="
2672 "%4u (port=%d queue=%d)",
2673 tx_last, txq->port_id, txq->queue_id);
2675 td_cmd |= ICE_TX_DESC_CMD_RS;
2677 /* Update txq RS bit counters */
2678 txq->nb_tx_used = 0;
2680 txd->cmd_type_offset_bsz |=
2681 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2685 /* update Tail register */
2686 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2687 txq->tx_tail = tx_id;
2692 static __rte_always_inline int
2693 ice_tx_free_bufs(struct ice_tx_queue *txq)
2695 struct ice_tx_entry *txep;
2698 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2699 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2700 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2703 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2705 for (i = 0; i < txq->tx_rs_thresh; i++)
2706 rte_prefetch0((txep + i)->mbuf);
2708 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2709 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2710 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2714 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2715 rte_pktmbuf_free_seg(txep->mbuf);
2720 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2721 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2722 if (txq->tx_next_dd >= txq->nb_tx_desc)
2723 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2725 return txq->tx_rs_thresh;
2729 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2732 struct ice_tx_entry *swr_ring = txq->sw_ring;
2733 uint16_t i, tx_last, tx_id;
2734 uint16_t nb_tx_free_last;
2735 uint16_t nb_tx_to_clean;
2738 /* Start free mbuf from the next of tx_tail */
2739 tx_last = txq->tx_tail;
2740 tx_id = swr_ring[tx_last].next_id;
2742 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2745 nb_tx_to_clean = txq->nb_tx_free;
2746 nb_tx_free_last = txq->nb_tx_free;
2748 free_cnt = txq->nb_tx_desc;
2750 /* Loop through swr_ring to count the amount of
2751 * freeable mubfs and packets.
2753 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2754 for (i = 0; i < nb_tx_to_clean &&
2755 pkt_cnt < free_cnt &&
2756 tx_id != tx_last; i++) {
2757 if (swr_ring[tx_id].mbuf != NULL) {
2758 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2759 swr_ring[tx_id].mbuf = NULL;
2762 * last segment in the packet,
2763 * increment packet count
2765 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2768 tx_id = swr_ring[tx_id].next_id;
2771 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2772 txq->nb_tx_free || tx_id == tx_last)
2775 if (pkt_cnt < free_cnt) {
2776 if (ice_xmit_cleanup(txq))
2779 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2780 nb_tx_free_last = txq->nb_tx_free;
2784 return (int)pkt_cnt;
2789 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2790 uint32_t free_cnt __rte_unused)
2797 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2802 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2803 free_cnt = txq->nb_tx_desc;
2805 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2807 for (i = 0; i < cnt; i += n) {
2808 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2811 n = ice_tx_free_bufs(txq);
2821 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2823 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2824 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2825 struct ice_adapter *ad =
2826 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2829 if (ad->tx_vec_allowed)
2830 return ice_tx_done_cleanup_vec(q, free_cnt);
2832 if (ad->tx_simple_allowed)
2833 return ice_tx_done_cleanup_simple(q, free_cnt);
2835 return ice_tx_done_cleanup_full(q, free_cnt);
2838 /* Populate 4 descriptors with data from 4 mbufs */
2840 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2845 for (i = 0; i < 4; i++, txdp++, pkts++) {
2846 dma_addr = rte_mbuf_data_iova(*pkts);
2847 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2848 txdp->cmd_type_offset_bsz =
2849 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2850 (*pkts)->data_len, 0);
2854 /* Populate 1 descriptor with data from 1 mbuf */
2856 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2860 dma_addr = rte_mbuf_data_iova(*pkts);
2861 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2862 txdp->cmd_type_offset_bsz =
2863 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2864 (*pkts)->data_len, 0);
2868 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2871 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2872 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2873 const int N_PER_LOOP = 4;
2874 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2875 int mainpart, leftover;
2879 * Process most of the packets in chunks of N pkts. Any
2880 * leftover packets will get processed one at a time.
2882 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2883 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2884 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2885 /* Copy N mbuf pointers to the S/W ring */
2886 for (j = 0; j < N_PER_LOOP; ++j)
2887 (txep + i + j)->mbuf = *(pkts + i + j);
2888 tx4(txdp + i, pkts + i);
2891 if (unlikely(leftover > 0)) {
2892 for (i = 0; i < leftover; ++i) {
2893 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2894 tx1(txdp + mainpart + i, pkts + mainpart + i);
2899 static inline uint16_t
2900 tx_xmit_pkts(struct ice_tx_queue *txq,
2901 struct rte_mbuf **tx_pkts,
2904 volatile struct ice_tx_desc *txr = txq->tx_ring;
2908 * Begin scanning the H/W ring for done descriptors when the number
2909 * of available descriptors drops below tx_free_thresh. For each done
2910 * descriptor, free the associated buffer.
2912 if (txq->nb_tx_free < txq->tx_free_thresh)
2913 ice_tx_free_bufs(txq);
2915 /* Use available descriptor only */
2916 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2917 if (unlikely(!nb_pkts))
2920 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2921 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2922 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2923 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2924 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2925 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2927 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2931 /* Fill hardware descriptor ring with mbuf data */
2932 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2933 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2935 /* Determin if RS bit needs to be set */
2936 if (txq->tx_tail > txq->tx_next_rs) {
2937 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2938 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2941 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2942 if (txq->tx_next_rs >= txq->nb_tx_desc)
2943 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2946 if (txq->tx_tail >= txq->nb_tx_desc)
2949 /* Update the tx tail register */
2950 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
2956 ice_xmit_pkts_simple(void *tx_queue,
2957 struct rte_mbuf **tx_pkts,
2962 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2963 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2967 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2970 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2971 &tx_pkts[nb_tx], num);
2972 nb_tx = (uint16_t)(nb_tx + ret);
2973 nb_pkts = (uint16_t)(nb_pkts - ret);
2982 ice_set_rx_function(struct rte_eth_dev *dev)
2984 PMD_INIT_FUNC_TRACE();
2985 struct ice_adapter *ad =
2986 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2988 struct ice_rx_queue *rxq;
2990 bool use_avx2 = false;
2992 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2993 if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
2994 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2995 ad->rx_vec_allowed = true;
2996 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2997 rxq = dev->data->rx_queues[i];
2998 if (rxq && ice_rxq_vec_setup(rxq)) {
2999 ad->rx_vec_allowed = false;
3004 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3005 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3006 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3010 ad->rx_vec_allowed = false;
3014 if (ad->rx_vec_allowed) {
3015 if (dev->data->scattered_rx) {
3017 "Using %sVector Scattered Rx (port %d).",
3018 use_avx2 ? "avx2 " : "",
3019 dev->data->port_id);
3020 dev->rx_pkt_burst = use_avx2 ?
3021 ice_recv_scattered_pkts_vec_avx2 :
3022 ice_recv_scattered_pkts_vec;
3024 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
3025 use_avx2 ? "avx2 " : "",
3026 dev->data->port_id);
3027 dev->rx_pkt_burst = use_avx2 ?
3028 ice_recv_pkts_vec_avx2 :
3036 if (dev->data->scattered_rx) {
3037 /* Set the non-LRO scattered function */
3039 "Using a Scattered function on port %d.",
3040 dev->data->port_id);
3041 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3042 } else if (ad->rx_bulk_alloc_allowed) {
3044 "Rx Burst Bulk Alloc Preconditions are "
3045 "satisfied. Rx Burst Bulk Alloc function "
3046 "will be used on port %d.",
3047 dev->data->port_id);
3048 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3051 "Rx Burst Bulk Alloc Preconditions are not "
3052 "satisfied, Normal Rx will be used on port %d.",
3053 dev->data->port_id);
3054 dev->rx_pkt_burst = ice_recv_pkts;
3058 static const struct {
3059 eth_rx_burst_t pkt_burst;
3061 } ice_rx_burst_infos[] = {
3062 { ice_recv_scattered_pkts, "Scalar Scattered" },
3063 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3064 { ice_recv_pkts, "Scalar" },
3066 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3067 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3068 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3069 { ice_recv_pkts_vec, "Vector SSE" },
3074 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3075 struct rte_eth_burst_mode *mode)
3077 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3081 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3082 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3083 snprintf(mode->info, sizeof(mode->info), "%s",
3084 ice_rx_burst_infos[i].info);
3094 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3096 struct ice_adapter *ad =
3097 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3099 /* Use a simple Tx queue if possible (only fast free is allowed) */
3100 ad->tx_simple_allowed =
3102 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3103 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3105 if (ad->tx_simple_allowed)
3106 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3110 "Simple Tx can NOT be enabled on Tx queue %u.",
3114 /*********************************************************************
3118 **********************************************************************/
3119 /* The default values of TSO MSS */
3120 #define ICE_MIN_TSO_MSS 64
3121 #define ICE_MAX_TSO_MSS 9728
3122 #define ICE_MAX_TSO_FRAME_SIZE 262144
3124 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3131 for (i = 0; i < nb_pkts; i++) {
3133 ol_flags = m->ol_flags;
3135 if (ol_flags & PKT_TX_TCP_SEG &&
3136 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3137 m->tso_segsz > ICE_MAX_TSO_MSS ||
3138 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3140 * MSS outside the range are considered malicious
3146 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3147 ret = rte_validate_tx_offload(m);
3153 ret = rte_net_intel_cksum_prepare(m);
3163 ice_set_tx_function(struct rte_eth_dev *dev)
3165 struct ice_adapter *ad =
3166 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3168 struct ice_tx_queue *txq;
3170 bool use_avx2 = false;
3172 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3173 if (!ice_tx_vec_dev_check(dev) &&
3174 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3175 ad->tx_vec_allowed = true;
3176 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3177 txq = dev->data->tx_queues[i];
3178 if (txq && ice_txq_vec_setup(txq)) {
3179 ad->tx_vec_allowed = false;
3184 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3185 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3186 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3190 ad->tx_vec_allowed = false;
3194 if (ad->tx_vec_allowed) {
3195 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3196 use_avx2 ? "avx2 " : "",
3197 dev->data->port_id);
3198 dev->tx_pkt_burst = use_avx2 ?
3199 ice_xmit_pkts_vec_avx2 :
3201 dev->tx_pkt_prepare = NULL;
3207 if (ad->tx_simple_allowed) {
3208 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3209 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3210 dev->tx_pkt_prepare = NULL;
3212 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3213 dev->tx_pkt_burst = ice_xmit_pkts;
3214 dev->tx_pkt_prepare = ice_prep_pkts;
3218 static const struct {
3219 eth_tx_burst_t pkt_burst;
3221 } ice_tx_burst_infos[] = {
3222 { ice_xmit_pkts_simple, "Scalar Simple" },
3223 { ice_xmit_pkts, "Scalar" },
3225 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3226 { ice_xmit_pkts_vec, "Vector SSE" },
3231 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3232 struct rte_eth_burst_mode *mode)
3234 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3238 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3239 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3240 snprintf(mode->info, sizeof(mode->info), "%s",
3241 ice_tx_burst_infos[i].info);
3250 /* For each value it means, datasheet of hardware can tell more details
3252 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3254 static inline uint32_t
3255 ice_get_default_pkt_type(uint16_t ptype)
3257 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3258 __rte_cache_aligned = {
3261 [1] = RTE_PTYPE_L2_ETHER,
3262 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3263 /* [3] - [5] reserved */
3264 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3265 /* [7] - [10] reserved */
3266 [11] = RTE_PTYPE_L2_ETHER_ARP,
3267 /* [12] - [21] reserved */
3269 /* Non tunneled IPv4 */
3270 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3272 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3273 RTE_PTYPE_L4_NONFRAG,
3274 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3277 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3279 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3281 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3285 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3286 RTE_PTYPE_TUNNEL_IP |
3287 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3288 RTE_PTYPE_INNER_L4_FRAG,
3289 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3290 RTE_PTYPE_TUNNEL_IP |
3291 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3292 RTE_PTYPE_INNER_L4_NONFRAG,
3293 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3294 RTE_PTYPE_TUNNEL_IP |
3295 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3296 RTE_PTYPE_INNER_L4_UDP,
3298 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3299 RTE_PTYPE_TUNNEL_IP |
3300 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3301 RTE_PTYPE_INNER_L4_TCP,
3302 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3303 RTE_PTYPE_TUNNEL_IP |
3304 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3305 RTE_PTYPE_INNER_L4_SCTP,
3306 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3307 RTE_PTYPE_TUNNEL_IP |
3308 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3309 RTE_PTYPE_INNER_L4_ICMP,
3312 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3313 RTE_PTYPE_TUNNEL_IP |
3314 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3315 RTE_PTYPE_INNER_L4_FRAG,
3316 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3317 RTE_PTYPE_TUNNEL_IP |
3318 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3319 RTE_PTYPE_INNER_L4_NONFRAG,
3320 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3321 RTE_PTYPE_TUNNEL_IP |
3322 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3323 RTE_PTYPE_INNER_L4_UDP,
3325 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3326 RTE_PTYPE_TUNNEL_IP |
3327 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3328 RTE_PTYPE_INNER_L4_TCP,
3329 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3330 RTE_PTYPE_TUNNEL_IP |
3331 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3332 RTE_PTYPE_INNER_L4_SCTP,
3333 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3334 RTE_PTYPE_TUNNEL_IP |
3335 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3336 RTE_PTYPE_INNER_L4_ICMP,
3338 /* IPv4 --> GRE/Teredo/VXLAN */
3339 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3340 RTE_PTYPE_TUNNEL_GRENAT,
3342 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3343 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3344 RTE_PTYPE_TUNNEL_GRENAT |
3345 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3346 RTE_PTYPE_INNER_L4_FRAG,
3347 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3348 RTE_PTYPE_TUNNEL_GRENAT |
3349 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3350 RTE_PTYPE_INNER_L4_NONFRAG,
3351 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3352 RTE_PTYPE_TUNNEL_GRENAT |
3353 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3354 RTE_PTYPE_INNER_L4_UDP,
3356 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3357 RTE_PTYPE_TUNNEL_GRENAT |
3358 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3359 RTE_PTYPE_INNER_L4_TCP,
3360 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3361 RTE_PTYPE_TUNNEL_GRENAT |
3362 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3363 RTE_PTYPE_INNER_L4_SCTP,
3364 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3365 RTE_PTYPE_TUNNEL_GRENAT |
3366 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3367 RTE_PTYPE_INNER_L4_ICMP,
3369 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3370 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3371 RTE_PTYPE_TUNNEL_GRENAT |
3372 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3373 RTE_PTYPE_INNER_L4_FRAG,
3374 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3375 RTE_PTYPE_TUNNEL_GRENAT |
3376 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3377 RTE_PTYPE_INNER_L4_NONFRAG,
3378 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3379 RTE_PTYPE_TUNNEL_GRENAT |
3380 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3381 RTE_PTYPE_INNER_L4_UDP,
3383 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3384 RTE_PTYPE_TUNNEL_GRENAT |
3385 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3386 RTE_PTYPE_INNER_L4_TCP,
3387 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3388 RTE_PTYPE_TUNNEL_GRENAT |
3389 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3390 RTE_PTYPE_INNER_L4_SCTP,
3391 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3392 RTE_PTYPE_TUNNEL_GRENAT |
3393 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3394 RTE_PTYPE_INNER_L4_ICMP,
3396 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3397 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3398 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3400 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3401 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3402 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3403 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3404 RTE_PTYPE_INNER_L4_FRAG,
3405 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3406 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3407 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3408 RTE_PTYPE_INNER_L4_NONFRAG,
3409 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3410 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3411 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3412 RTE_PTYPE_INNER_L4_UDP,
3414 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3415 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3416 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3417 RTE_PTYPE_INNER_L4_TCP,
3418 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3419 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3420 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3421 RTE_PTYPE_INNER_L4_SCTP,
3422 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3423 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3424 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3425 RTE_PTYPE_INNER_L4_ICMP,
3427 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3428 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3429 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3430 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3431 RTE_PTYPE_INNER_L4_FRAG,
3432 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3433 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3434 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3435 RTE_PTYPE_INNER_L4_NONFRAG,
3436 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3437 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3438 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3439 RTE_PTYPE_INNER_L4_UDP,
3441 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3442 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3443 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3444 RTE_PTYPE_INNER_L4_TCP,
3445 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3446 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3447 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3448 RTE_PTYPE_INNER_L4_SCTP,
3449 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3450 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3451 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3452 RTE_PTYPE_INNER_L4_ICMP,
3453 /* [73] - [87] reserved */
3455 /* Non tunneled IPv6 */
3456 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3458 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3459 RTE_PTYPE_L4_NONFRAG,
3460 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3463 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3465 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3467 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3471 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3472 RTE_PTYPE_TUNNEL_IP |
3473 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3474 RTE_PTYPE_INNER_L4_FRAG,
3475 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3476 RTE_PTYPE_TUNNEL_IP |
3477 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3478 RTE_PTYPE_INNER_L4_NONFRAG,
3479 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3480 RTE_PTYPE_TUNNEL_IP |
3481 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3482 RTE_PTYPE_INNER_L4_UDP,
3484 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3485 RTE_PTYPE_TUNNEL_IP |
3486 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3487 RTE_PTYPE_INNER_L4_TCP,
3488 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3489 RTE_PTYPE_TUNNEL_IP |
3490 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3491 RTE_PTYPE_INNER_L4_SCTP,
3492 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3493 RTE_PTYPE_TUNNEL_IP |
3494 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3495 RTE_PTYPE_INNER_L4_ICMP,
3498 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3499 RTE_PTYPE_TUNNEL_IP |
3500 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3501 RTE_PTYPE_INNER_L4_FRAG,
3502 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3503 RTE_PTYPE_TUNNEL_IP |
3504 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3505 RTE_PTYPE_INNER_L4_NONFRAG,
3506 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3507 RTE_PTYPE_TUNNEL_IP |
3508 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3509 RTE_PTYPE_INNER_L4_UDP,
3510 /* [105] reserved */
3511 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3512 RTE_PTYPE_TUNNEL_IP |
3513 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3514 RTE_PTYPE_INNER_L4_TCP,
3515 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3516 RTE_PTYPE_TUNNEL_IP |
3517 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3518 RTE_PTYPE_INNER_L4_SCTP,
3519 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3520 RTE_PTYPE_TUNNEL_IP |
3521 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3522 RTE_PTYPE_INNER_L4_ICMP,
3524 /* IPv6 --> GRE/Teredo/VXLAN */
3525 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3526 RTE_PTYPE_TUNNEL_GRENAT,
3528 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3529 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3530 RTE_PTYPE_TUNNEL_GRENAT |
3531 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3532 RTE_PTYPE_INNER_L4_FRAG,
3533 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3534 RTE_PTYPE_TUNNEL_GRENAT |
3535 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3536 RTE_PTYPE_INNER_L4_NONFRAG,
3537 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3538 RTE_PTYPE_TUNNEL_GRENAT |
3539 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3540 RTE_PTYPE_INNER_L4_UDP,
3541 /* [113] reserved */
3542 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3543 RTE_PTYPE_TUNNEL_GRENAT |
3544 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3545 RTE_PTYPE_INNER_L4_TCP,
3546 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3547 RTE_PTYPE_TUNNEL_GRENAT |
3548 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3549 RTE_PTYPE_INNER_L4_SCTP,
3550 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3551 RTE_PTYPE_TUNNEL_GRENAT |
3552 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3553 RTE_PTYPE_INNER_L4_ICMP,
3555 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3556 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3557 RTE_PTYPE_TUNNEL_GRENAT |
3558 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3559 RTE_PTYPE_INNER_L4_FRAG,
3560 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3561 RTE_PTYPE_TUNNEL_GRENAT |
3562 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3563 RTE_PTYPE_INNER_L4_NONFRAG,
3564 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3565 RTE_PTYPE_TUNNEL_GRENAT |
3566 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3567 RTE_PTYPE_INNER_L4_UDP,
3568 /* [120] reserved */
3569 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3570 RTE_PTYPE_TUNNEL_GRENAT |
3571 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3572 RTE_PTYPE_INNER_L4_TCP,
3573 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3574 RTE_PTYPE_TUNNEL_GRENAT |
3575 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3576 RTE_PTYPE_INNER_L4_SCTP,
3577 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3578 RTE_PTYPE_TUNNEL_GRENAT |
3579 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3580 RTE_PTYPE_INNER_L4_ICMP,
3582 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3583 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3584 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3586 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3587 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3588 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3589 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3590 RTE_PTYPE_INNER_L4_FRAG,
3591 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3592 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3593 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3594 RTE_PTYPE_INNER_L4_NONFRAG,
3595 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3596 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3597 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3598 RTE_PTYPE_INNER_L4_UDP,
3599 /* [128] reserved */
3600 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3601 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3602 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3603 RTE_PTYPE_INNER_L4_TCP,
3604 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3605 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3606 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3607 RTE_PTYPE_INNER_L4_SCTP,
3608 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3609 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3610 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3611 RTE_PTYPE_INNER_L4_ICMP,
3613 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3614 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3615 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3616 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3617 RTE_PTYPE_INNER_L4_FRAG,
3618 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3619 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3620 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3621 RTE_PTYPE_INNER_L4_NONFRAG,
3622 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3623 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3624 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3625 RTE_PTYPE_INNER_L4_UDP,
3626 /* [135] reserved */
3627 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3628 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3629 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3630 RTE_PTYPE_INNER_L4_TCP,
3631 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3632 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3633 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3634 RTE_PTYPE_INNER_L4_SCTP,
3635 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3636 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3637 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3638 RTE_PTYPE_INNER_L4_ICMP,
3639 /* [139] - [299] reserved */
3642 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3643 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3645 /* PPPoE --> IPv4 */
3646 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3647 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3649 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3650 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3651 RTE_PTYPE_L4_NONFRAG,
3652 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3653 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3655 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3656 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3658 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3659 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3661 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3662 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3665 /* PPPoE --> IPv6 */
3666 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3667 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3669 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3670 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3671 RTE_PTYPE_L4_NONFRAG,
3672 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3673 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3675 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3676 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3678 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3679 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3681 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3682 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3684 /* [314] - [324] reserved */
3686 /* IPv4/IPv6 --> GTPC/GTPU */
3687 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3688 RTE_PTYPE_TUNNEL_GTPC,
3689 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3690 RTE_PTYPE_TUNNEL_GTPC,
3691 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3692 RTE_PTYPE_TUNNEL_GTPC,
3693 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3694 RTE_PTYPE_TUNNEL_GTPC,
3695 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3696 RTE_PTYPE_TUNNEL_GTPU,
3697 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3698 RTE_PTYPE_TUNNEL_GTPU,
3700 /* IPv4 --> GTPU --> IPv4 */
3701 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3702 RTE_PTYPE_TUNNEL_GTPU |
3703 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3704 RTE_PTYPE_INNER_L4_FRAG,
3705 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3706 RTE_PTYPE_TUNNEL_GTPU |
3707 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3708 RTE_PTYPE_INNER_L4_NONFRAG,
3709 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3710 RTE_PTYPE_TUNNEL_GTPU |
3711 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3712 RTE_PTYPE_INNER_L4_UDP,
3713 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3714 RTE_PTYPE_TUNNEL_GTPU |
3715 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3716 RTE_PTYPE_INNER_L4_TCP,
3717 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3718 RTE_PTYPE_TUNNEL_GTPU |
3719 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3720 RTE_PTYPE_INNER_L4_ICMP,
3722 /* IPv6 --> GTPU --> IPv4 */
3723 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3724 RTE_PTYPE_TUNNEL_GTPU |
3725 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3726 RTE_PTYPE_INNER_L4_FRAG,
3727 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3728 RTE_PTYPE_TUNNEL_GTPU |
3729 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3730 RTE_PTYPE_INNER_L4_NONFRAG,
3731 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3732 RTE_PTYPE_TUNNEL_GTPU |
3733 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3734 RTE_PTYPE_INNER_L4_UDP,
3735 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3736 RTE_PTYPE_TUNNEL_GTPU |
3737 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3738 RTE_PTYPE_INNER_L4_TCP,
3739 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3740 RTE_PTYPE_TUNNEL_GTPU |
3741 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3742 RTE_PTYPE_INNER_L4_ICMP,
3744 /* IPv4 --> GTPU --> IPv6 */
3745 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3746 RTE_PTYPE_TUNNEL_GTPU |
3747 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3748 RTE_PTYPE_INNER_L4_FRAG,
3749 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3750 RTE_PTYPE_TUNNEL_GTPU |
3751 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3752 RTE_PTYPE_INNER_L4_NONFRAG,
3753 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3754 RTE_PTYPE_TUNNEL_GTPU |
3755 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3756 RTE_PTYPE_INNER_L4_UDP,
3757 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3758 RTE_PTYPE_TUNNEL_GTPU |
3759 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3760 RTE_PTYPE_INNER_L4_TCP,
3761 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3762 RTE_PTYPE_TUNNEL_GTPU |
3763 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3764 RTE_PTYPE_INNER_L4_ICMP,
3766 /* IPv6 --> GTPU --> IPv6 */
3767 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3768 RTE_PTYPE_TUNNEL_GTPU |
3769 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3770 RTE_PTYPE_INNER_L4_FRAG,
3771 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3772 RTE_PTYPE_TUNNEL_GTPU |
3773 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3774 RTE_PTYPE_INNER_L4_NONFRAG,
3775 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3776 RTE_PTYPE_TUNNEL_GTPU |
3777 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3778 RTE_PTYPE_INNER_L4_UDP,
3779 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3780 RTE_PTYPE_TUNNEL_GTPU |
3781 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3782 RTE_PTYPE_INNER_L4_TCP,
3783 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3784 RTE_PTYPE_TUNNEL_GTPU |
3785 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3786 RTE_PTYPE_INNER_L4_ICMP,
3787 /* All others reserved */
3790 return type_table[ptype];
3794 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3796 struct ice_adapter *ad =
3797 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3800 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3801 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
3804 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
3805 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
3806 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
3807 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
3808 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
3810 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
3811 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
3812 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
3813 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
3814 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
3815 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
3818 * check the programming status descriptor in rx queue.
3819 * done after Programming Flow Director is programmed on
3823 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
3825 volatile union ice_32byte_rx_desc *rxdp;
3832 rxdp = (volatile union ice_32byte_rx_desc *)
3833 (&rxq->rx_ring[rxq->rx_tail]);
3834 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
3835 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
3836 >> ICE_RXD_QW1_STATUS_S;
3838 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
3840 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
3841 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
3842 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
3843 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
3845 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
3846 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
3847 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
3848 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
3852 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
3853 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
3855 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
3859 rxdp->wb.qword1.status_error_len = 0;
3861 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
3863 if (rxq->rx_tail == 0)
3864 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
3866 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
3872 #define ICE_FDIR_MAX_WAIT_US 10000
3875 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
3877 struct ice_tx_queue *txq = pf->fdir.txq;
3878 struct ice_rx_queue *rxq = pf->fdir.rxq;
3879 volatile struct ice_fltr_desc *fdirdp;
3880 volatile struct ice_tx_desc *txdp;
3884 fdirdp = (volatile struct ice_fltr_desc *)
3885 (&txq->tx_ring[txq->tx_tail]);
3886 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
3887 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
3889 txdp = &txq->tx_ring[txq->tx_tail + 1];
3890 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
3891 td_cmd = ICE_TX_DESC_CMD_EOP |
3892 ICE_TX_DESC_CMD_RS |
3893 ICE_TX_DESC_CMD_DUMMY;
3895 txdp->cmd_type_offset_bsz =
3896 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
3899 if (txq->tx_tail >= txq->nb_tx_desc)
3901 /* Update the tx tail register */
3902 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
3903 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
3904 if ((txdp->cmd_type_offset_bsz &
3905 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
3906 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3910 if (i >= ICE_FDIR_MAX_WAIT_US) {
3912 "Failed to program FDIR filter: time out to get DD on tx queue.");
3916 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
3919 ret = ice_check_fdir_programming_status(rxq);
3927 "Failed to program FDIR filter: programming status reported.");