1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
9 #include "rte_pmd_ice.h"
12 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
16 PKT_TX_OUTER_IP_CKSUM)
18 /* Offset of mbuf dynamic field for protocol extraction data */
19 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
21 /* Mask of mbuf dynamic flags for protocol extraction type */
22 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
30 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
32 static uint8_t rxdid_map[] = {
33 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
34 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
35 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
36 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
37 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
38 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
39 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
42 return xtr_type < RTE_DIM(rxdid_map) ?
43 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
47 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
49 volatile union ice_rx_flex_desc *rxdp)
51 volatile struct ice_32b_rx_flex_desc_comms *desc =
52 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
53 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
55 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
56 mb->ol_flags |= PKT_RX_RSS_HASH;
57 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
60 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
61 if (desc->flow_id != 0xFFFFFFFF) {
62 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
63 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
69 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
71 volatile union ice_rx_flex_desc *rxdp)
73 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
74 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
75 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
79 if (desc->flow_id != 0xFFFFFFFF) {
80 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
81 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
84 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
85 stat_err = rte_le_to_cpu_16(desc->status_error0);
86 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
87 mb->ol_flags |= PKT_RX_RSS_HASH;
88 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
94 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
96 volatile union ice_rx_flex_desc *rxdp)
98 volatile struct ice_32b_rx_flex_desc_comms *desc =
99 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
102 stat_err = rte_le_to_cpu_16(desc->status_error0);
103 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
104 mb->ol_flags |= PKT_RX_RSS_HASH;
105 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
108 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
109 if (desc->flow_id != 0xFFFFFFFF) {
110 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
111 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
114 if (rxq->xtr_ol_flag) {
115 uint32_t metadata = 0;
117 stat_err = rte_le_to_cpu_16(desc->status_error1);
119 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
120 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
122 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
124 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
127 mb->ol_flags |= rxq->xtr_ol_flag;
129 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
136 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
138 volatile union ice_rx_flex_desc *rxdp)
140 volatile struct ice_32b_rx_flex_desc_comms *desc =
141 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
144 stat_err = rte_le_to_cpu_16(desc->status_error0);
145 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
146 mb->ol_flags |= PKT_RX_RSS_HASH;
147 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
150 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
151 if (desc->flow_id != 0xFFFFFFFF) {
152 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
153 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
156 if (rxq->xtr_ol_flag) {
157 uint32_t metadata = 0;
159 if (desc->flex_ts.flex.aux0 != 0xFFFF)
160 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
161 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
162 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
165 mb->ol_flags |= rxq->xtr_ol_flag;
167 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
174 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
177 case ICE_RXDID_COMMS_AUX_VLAN:
178 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
179 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
182 case ICE_RXDID_COMMS_AUX_IPV4:
183 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
184 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
187 case ICE_RXDID_COMMS_AUX_IPV6:
188 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
189 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
192 case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
193 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
194 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
197 case ICE_RXDID_COMMS_AUX_TCP:
198 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
199 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
202 case ICE_RXDID_COMMS_AUX_IP_OFFSET:
203 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
204 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
207 case ICE_RXDID_COMMS_GENERIC:
208 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
211 case ICE_RXDID_COMMS_OVS:
212 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
216 /* update this according to the RXDID for PROTO_XTR_NONE */
217 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
221 if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
222 rxq->xtr_ol_flag = 0;
225 static enum ice_status
226 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
228 struct ice_vsi *vsi = rxq->vsi;
229 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
230 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
231 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
232 struct ice_rlan_ctx rx_ctx;
234 uint16_t buf_size, len;
235 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
236 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
239 /* Set buffer size as the head split is disabled. */
240 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
241 RTE_PKTMBUF_HEADROOM);
243 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
244 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
245 rxq->max_pkt_len = RTE_MIN(len,
246 dev->data->dev_conf.rxmode.max_rx_pkt_len);
248 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
249 if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
250 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
251 PMD_DRV_LOG(ERR, "maximum packet length must "
252 "be larger than %u and smaller than %u,"
253 "as jumbo frame is enabled",
254 (uint32_t)RTE_ETHER_MAX_LEN,
255 (uint32_t)ICE_FRAME_SIZE_MAX);
259 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
260 rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
261 PMD_DRV_LOG(ERR, "maximum packet length must be "
262 "larger than %u and smaller than %u, "
263 "as jumbo frame is disabled",
264 (uint32_t)RTE_ETHER_MIN_LEN,
265 (uint32_t)RTE_ETHER_MAX_LEN);
270 memset(&rx_ctx, 0, sizeof(rx_ctx));
272 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
273 rx_ctx.qlen = rxq->nb_rx_desc;
274 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
275 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
276 rx_ctx.dtype = 0; /* No Header Split mode */
277 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
278 rx_ctx.dsize = 1; /* 32B descriptors */
280 rx_ctx.rxmax = rxq->max_pkt_len;
281 /* TPH: Transaction Layer Packet (TLP) processing hints */
282 rx_ctx.tphrdesc_ena = 1;
283 rx_ctx.tphwdesc_ena = 1;
284 rx_ctx.tphdata_ena = 1;
285 rx_ctx.tphhead_ena = 1;
286 /* Low Receive Queue Threshold defined in 64 descriptors units.
287 * When the number of free descriptors goes below the lrxqthresh,
288 * an immediate interrupt is triggered.
290 rx_ctx.lrxqthresh = 2;
291 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
294 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
296 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
298 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
299 rxq->port_id, rxq->queue_id, rxdid);
301 if (!(pf->supported_rxdid & BIT(rxdid))) {
302 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
307 ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
309 /* Enable Flexible Descriptors in the queue context which
310 * allows this driver to select a specific receive descriptor format
312 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
313 QRXFLXP_CNTXT_RXDID_IDX_M;
315 /* increasing context priority to pick up profile ID;
316 * default is 0x01; setting to 0x03 to ensure profile
317 * is programming if prev context is of same priority
319 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
320 QRXFLXP_CNTXT_RXDID_PRIO_M;
322 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
324 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
326 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
330 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
332 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
337 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
338 RTE_PKTMBUF_HEADROOM);
340 /* Check if scattered RX needs to be used. */
341 if (rxq->max_pkt_len > buf_size)
342 dev->data->scattered_rx = 1;
344 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
346 /* Init the Rx tail register*/
347 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
352 /* Allocate mbufs for all descriptors in rx queue */
354 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
356 struct ice_rx_entry *rxe = rxq->sw_ring;
360 for (i = 0; i < rxq->nb_rx_desc; i++) {
361 volatile union ice_rx_flex_desc *rxd;
362 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
364 if (unlikely(!mbuf)) {
365 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
369 rte_mbuf_refcnt_set(mbuf, 1);
371 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
373 mbuf->port = rxq->port_id;
376 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
378 rxd = &rxq->rx_ring[i];
379 rxd->read.pkt_addr = dma_addr;
380 rxd->read.hdr_addr = 0;
381 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
391 /* Free all mbufs for descriptors in rx queue */
393 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
397 if (!rxq || !rxq->sw_ring) {
398 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
402 for (i = 0; i < rxq->nb_rx_desc; i++) {
403 if (rxq->sw_ring[i].mbuf) {
404 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
405 rxq->sw_ring[i].mbuf = NULL;
408 if (rxq->rx_nb_avail == 0)
410 for (i = 0; i < rxq->rx_nb_avail; i++)
411 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
413 rxq->rx_nb_avail = 0;
416 /* turn on or off rx queue
417 * @q_idx: queue index in pf scope
418 * @on: turn on or off the queue
421 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
426 /* QRX_CTRL = QRX_ENA */
427 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
430 if (reg & QRX_CTRL_QENA_STAT_M)
431 return 0; /* Already on, skip */
432 reg |= QRX_CTRL_QENA_REQ_M;
434 if (!(reg & QRX_CTRL_QENA_STAT_M))
435 return 0; /* Already off, skip */
436 reg &= ~QRX_CTRL_QENA_REQ_M;
439 /* Write the register */
440 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
441 /* Check the result. It is said that QENA_STAT
442 * follows the QENA_REQ not more than 10 use.
443 * TODO: need to change the wait counter later
445 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
446 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
447 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
449 if ((reg & QRX_CTRL_QENA_REQ_M) &&
450 (reg & QRX_CTRL_QENA_STAT_M))
453 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
454 !(reg & QRX_CTRL_QENA_STAT_M))
459 /* Check if it is timeout */
460 if (j >= ICE_CHK_Q_ENA_COUNT) {
461 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
462 (on ? "enable" : "disable"), q_idx);
470 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
474 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
475 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
476 "rxq->rx_free_thresh=%d, "
477 "ICE_RX_MAX_BURST=%d",
478 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
480 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
481 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
482 "rxq->rx_free_thresh=%d, "
483 "rxq->nb_rx_desc=%d",
484 rxq->rx_free_thresh, rxq->nb_rx_desc);
486 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
487 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
488 "rxq->nb_rx_desc=%d, "
489 "rxq->rx_free_thresh=%d",
490 rxq->nb_rx_desc, rxq->rx_free_thresh);
497 /* reset fields in ice_rx_queue back to default */
499 ice_reset_rx_queue(struct ice_rx_queue *rxq)
505 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
509 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
511 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
512 ((volatile char *)rxq->rx_ring)[i] = 0;
514 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
515 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
516 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
518 rxq->rx_nb_avail = 0;
519 rxq->rx_next_avail = 0;
520 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
524 rxq->pkt_first_seg = NULL;
525 rxq->pkt_last_seg = NULL;
527 rxq->rxrearm_start = 0;
532 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
534 struct ice_rx_queue *rxq;
536 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
538 PMD_INIT_FUNC_TRACE();
540 if (rx_queue_id >= dev->data->nb_rx_queues) {
541 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
542 rx_queue_id, dev->data->nb_rx_queues);
546 rxq = dev->data->rx_queues[rx_queue_id];
547 if (!rxq || !rxq->q_set) {
548 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
553 err = ice_program_hw_rx_queue(rxq);
555 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
560 err = ice_alloc_rx_queue_mbufs(rxq);
562 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
566 /* Init the RX tail register. */
567 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
569 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
571 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
574 rxq->rx_rel_mbufs(rxq);
575 ice_reset_rx_queue(rxq);
579 dev->data->rx_queue_state[rx_queue_id] =
580 RTE_ETH_QUEUE_STATE_STARTED;
586 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
588 struct ice_rx_queue *rxq;
590 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
592 if (rx_queue_id < dev->data->nb_rx_queues) {
593 rxq = dev->data->rx_queues[rx_queue_id];
595 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
597 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
601 rxq->rx_rel_mbufs(rxq);
602 ice_reset_rx_queue(rxq);
603 dev->data->rx_queue_state[rx_queue_id] =
604 RTE_ETH_QUEUE_STATE_STOPPED;
611 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
613 struct ice_tx_queue *txq;
617 struct ice_aqc_add_tx_qgrp *txq_elem;
618 struct ice_tlan_ctx tx_ctx;
621 PMD_INIT_FUNC_TRACE();
623 if (tx_queue_id >= dev->data->nb_tx_queues) {
624 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
625 tx_queue_id, dev->data->nb_tx_queues);
629 txq = dev->data->tx_queues[tx_queue_id];
630 if (!txq || !txq->q_set) {
631 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
636 buf_len = ice_struct_size(txq_elem, txqs, 1);
637 txq_elem = ice_malloc(hw, buf_len);
642 hw = ICE_VSI_TO_HW(vsi);
644 memset(&tx_ctx, 0, sizeof(tx_ctx));
645 txq_elem->num_txqs = 1;
646 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
648 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
649 tx_ctx.qlen = txq->nb_tx_desc;
650 tx_ctx.pf_num = hw->pf_id;
651 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
652 tx_ctx.src_vsi = vsi->vsi_id;
653 tx_ctx.port_num = hw->port_info->lport;
654 tx_ctx.tso_ena = 1; /* tso enable */
655 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
656 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
658 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
661 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
663 /* Init the Tx tail register*/
664 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
666 /* Fix me, we assume TC always 0 here */
667 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
668 txq_elem, buf_len, NULL);
670 PMD_DRV_LOG(ERR, "Failed to add lan txq");
674 /* store the schedule node id */
675 txq->q_teid = txq_elem->txqs[0].q_teid;
677 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
683 static enum ice_status
684 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
686 struct ice_vsi *vsi = rxq->vsi;
687 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
688 uint32_t rxdid = ICE_RXDID_LEGACY_1;
689 struct ice_rlan_ctx rx_ctx;
694 rxq->rx_buf_len = 1024;
696 memset(&rx_ctx, 0, sizeof(rx_ctx));
698 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
699 rx_ctx.qlen = rxq->nb_rx_desc;
700 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
701 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
702 rx_ctx.dtype = 0; /* No Header Split mode */
703 rx_ctx.dsize = 1; /* 32B descriptors */
704 rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
705 /* TPH: Transaction Layer Packet (TLP) processing hints */
706 rx_ctx.tphrdesc_ena = 1;
707 rx_ctx.tphwdesc_ena = 1;
708 rx_ctx.tphdata_ena = 1;
709 rx_ctx.tphhead_ena = 1;
710 /* Low Receive Queue Threshold defined in 64 descriptors units.
711 * When the number of free descriptors goes below the lrxqthresh,
712 * an immediate interrupt is triggered.
714 rx_ctx.lrxqthresh = 2;
715 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
718 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
720 /* Enable Flexible Descriptors in the queue context which
721 * allows this driver to select a specific receive descriptor format
723 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
724 QRXFLXP_CNTXT_RXDID_IDX_M;
726 /* increasing context priority to pick up profile ID;
727 * default is 0x01; setting to 0x03 to ensure profile
728 * is programming if prev context is of same priority
730 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
731 QRXFLXP_CNTXT_RXDID_PRIO_M;
733 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
735 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
737 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
741 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
743 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
748 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
750 /* Init the Rx tail register*/
751 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
757 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
759 struct ice_rx_queue *rxq;
761 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
762 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
764 PMD_INIT_FUNC_TRACE();
767 if (!rxq || !rxq->q_set) {
768 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
773 err = ice_fdir_program_hw_rx_queue(rxq);
775 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
780 /* Init the RX tail register. */
781 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
783 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
785 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
788 ice_reset_rx_queue(rxq);
796 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
798 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
799 struct ice_tx_queue *txq;
803 struct ice_aqc_add_tx_qgrp *txq_elem;
804 struct ice_tlan_ctx tx_ctx;
807 PMD_INIT_FUNC_TRACE();
810 if (!txq || !txq->q_set) {
811 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
816 buf_len = ice_struct_size(txq_elem, txqs, 1);
817 txq_elem = ice_malloc(hw, buf_len);
822 hw = ICE_VSI_TO_HW(vsi);
824 memset(&tx_ctx, 0, sizeof(tx_ctx));
825 txq_elem->num_txqs = 1;
826 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
828 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
829 tx_ctx.qlen = txq->nb_tx_desc;
830 tx_ctx.pf_num = hw->pf_id;
831 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
832 tx_ctx.src_vsi = vsi->vsi_id;
833 tx_ctx.port_num = hw->port_info->lport;
834 tx_ctx.tso_ena = 1; /* tso enable */
835 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
836 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
838 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
841 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
843 /* Init the Tx tail register*/
844 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
846 /* Fix me, we assume TC always 0 here */
847 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
848 txq_elem, buf_len, NULL);
850 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
854 /* store the schedule node id */
855 txq->q_teid = txq_elem->txqs[0].q_teid;
861 /* Free all mbufs for descriptors in tx queue */
863 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
867 if (!txq || !txq->sw_ring) {
868 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
872 for (i = 0; i < txq->nb_tx_desc; i++) {
873 if (txq->sw_ring[i].mbuf) {
874 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
875 txq->sw_ring[i].mbuf = NULL;
881 ice_reset_tx_queue(struct ice_tx_queue *txq)
883 struct ice_tx_entry *txe;
884 uint16_t i, prev, size;
887 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
892 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
893 for (i = 0; i < size; i++)
894 ((volatile char *)txq->tx_ring)[i] = 0;
896 prev = (uint16_t)(txq->nb_tx_desc - 1);
897 for (i = 0; i < txq->nb_tx_desc; i++) {
898 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
900 txd->cmd_type_offset_bsz =
901 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
904 txe[prev].next_id = i;
908 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
909 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
914 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
915 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
919 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
921 struct ice_tx_queue *txq;
922 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
923 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
924 struct ice_vsi *vsi = pf->main_vsi;
925 enum ice_status status;
928 uint16_t q_handle = tx_queue_id;
930 if (tx_queue_id >= dev->data->nb_tx_queues) {
931 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
932 tx_queue_id, dev->data->nb_tx_queues);
936 txq = dev->data->tx_queues[tx_queue_id];
938 PMD_DRV_LOG(ERR, "TX queue %u is not available",
943 q_ids[0] = txq->reg_idx;
944 q_teids[0] = txq->q_teid;
946 /* Fix me, we assume TC always 0 here */
947 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
948 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
949 if (status != ICE_SUCCESS) {
950 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
954 txq->tx_rel_mbufs(txq);
955 ice_reset_tx_queue(txq);
956 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
962 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
964 struct ice_rx_queue *rxq;
966 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
967 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
971 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
973 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
977 rxq->rx_rel_mbufs(rxq);
983 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
985 struct ice_tx_queue *txq;
986 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
987 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
988 struct ice_vsi *vsi = pf->main_vsi;
989 enum ice_status status;
992 uint16_t q_handle = tx_queue_id;
996 PMD_DRV_LOG(ERR, "TX queue %u is not available",
1002 q_ids[0] = txq->reg_idx;
1003 q_teids[0] = txq->q_teid;
1005 /* Fix me, we assume TC always 0 here */
1006 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1007 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1008 if (status != ICE_SUCCESS) {
1009 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1013 txq->tx_rel_mbufs(txq);
1019 ice_rx_queue_setup(struct rte_eth_dev *dev,
1022 unsigned int socket_id,
1023 const struct rte_eth_rxconf *rx_conf,
1024 struct rte_mempool *mp)
1026 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1027 struct ice_adapter *ad =
1028 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1029 struct ice_vsi *vsi = pf->main_vsi;
1030 struct ice_rx_queue *rxq;
1031 const struct rte_memzone *rz;
1034 int use_def_burst_func = 1;
1036 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1037 nb_desc > ICE_MAX_RING_DESC ||
1038 nb_desc < ICE_MIN_RING_DESC) {
1039 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1040 "invalid", nb_desc);
1044 /* Free memory if needed */
1045 if (dev->data->rx_queues[queue_idx]) {
1046 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1047 dev->data->rx_queues[queue_idx] = NULL;
1050 /* Allocate the rx queue data structure */
1051 rxq = rte_zmalloc_socket(NULL,
1052 sizeof(struct ice_rx_queue),
1053 RTE_CACHE_LINE_SIZE,
1056 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1057 "rx queue data structure");
1061 rxq->nb_rx_desc = nb_desc;
1062 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1063 rxq->queue_id = queue_idx;
1065 rxq->reg_idx = vsi->base_queue + queue_idx;
1066 rxq->port_id = dev->data->port_id;
1067 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1068 rxq->crc_len = RTE_ETHER_CRC_LEN;
1072 rxq->drop_en = rx_conf->rx_drop_en;
1074 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1075 rxq->proto_xtr = pf->proto_xtr != NULL ?
1076 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1078 /* Allocate the maximun number of RX ring hardware descriptor. */
1079 len = ICE_MAX_RING_DESC;
1082 * Allocating a little more memory because vectorized/bulk_alloc Rx
1083 * functions doesn't check boundaries each time.
1085 len += ICE_RX_MAX_BURST;
1087 /* Allocate the maximum number of RX ring hardware descriptor. */
1088 ring_size = sizeof(union ice_rx_flex_desc) * len;
1089 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1090 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1091 ring_size, ICE_RING_BASE_ALIGN,
1094 ice_rx_queue_release(rxq);
1095 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1099 /* Zero all the descriptors in the ring. */
1100 memset(rz->addr, 0, ring_size);
1102 rxq->rx_ring_dma = rz->iova;
1103 rxq->rx_ring = rz->addr;
1105 /* always reserve more for bulk alloc */
1106 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1108 /* Allocate the software ring. */
1109 rxq->sw_ring = rte_zmalloc_socket(NULL,
1110 sizeof(struct ice_rx_entry) * len,
1111 RTE_CACHE_LINE_SIZE,
1113 if (!rxq->sw_ring) {
1114 ice_rx_queue_release(rxq);
1115 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1119 ice_reset_rx_queue(rxq);
1121 dev->data->rx_queues[queue_idx] = rxq;
1122 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1124 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1126 if (!use_def_burst_func) {
1127 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1128 "satisfied. Rx Burst Bulk Alloc function will be "
1129 "used on port=%d, queue=%d.",
1130 rxq->port_id, rxq->queue_id);
1132 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1133 "not satisfied, Scattered Rx is requested. "
1134 "on port=%d, queue=%d.",
1135 rxq->port_id, rxq->queue_id);
1136 ad->rx_bulk_alloc_allowed = false;
1143 ice_rx_queue_release(void *rxq)
1145 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1148 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1153 rte_free(q->sw_ring);
1158 ice_tx_queue_setup(struct rte_eth_dev *dev,
1161 unsigned int socket_id,
1162 const struct rte_eth_txconf *tx_conf)
1164 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1165 struct ice_vsi *vsi = pf->main_vsi;
1166 struct ice_tx_queue *txq;
1167 const struct rte_memzone *tz;
1169 uint16_t tx_rs_thresh, tx_free_thresh;
1172 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1174 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1175 nb_desc > ICE_MAX_RING_DESC ||
1176 nb_desc < ICE_MIN_RING_DESC) {
1177 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1178 "invalid", nb_desc);
1183 * The following two parameters control the setting of the RS bit on
1184 * transmit descriptors. TX descriptors will have their RS bit set
1185 * after txq->tx_rs_thresh descriptors have been used. The TX
1186 * descriptor ring will be cleaned after txq->tx_free_thresh
1187 * descriptors are used or if the number of descriptors required to
1188 * transmit a packet is greater than the number of free TX descriptors.
1190 * The following constraints must be satisfied:
1191 * - tx_rs_thresh must be greater than 0.
1192 * - tx_rs_thresh must be less than the size of the ring minus 2.
1193 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1194 * - tx_rs_thresh must be a divisor of the ring size.
1195 * - tx_free_thresh must be greater than 0.
1196 * - tx_free_thresh must be less than the size of the ring minus 3.
1197 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1199 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1200 * race condition, hence the maximum threshold constraints. When set
1201 * to zero use default values.
1203 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1204 tx_conf->tx_free_thresh :
1205 ICE_DEFAULT_TX_FREE_THRESH);
1206 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1208 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1209 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1210 if (tx_conf->tx_rs_thresh)
1211 tx_rs_thresh = tx_conf->tx_rs_thresh;
1212 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1213 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1214 "exceed nb_desc. (tx_rs_thresh=%u "
1215 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1216 (unsigned int)tx_rs_thresh,
1217 (unsigned int)tx_free_thresh,
1218 (unsigned int)nb_desc,
1219 (int)dev->data->port_id,
1223 if (tx_rs_thresh >= (nb_desc - 2)) {
1224 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1225 "number of TX descriptors minus 2. "
1226 "(tx_rs_thresh=%u port=%d queue=%d)",
1227 (unsigned int)tx_rs_thresh,
1228 (int)dev->data->port_id,
1232 if (tx_free_thresh >= (nb_desc - 3)) {
1233 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1234 "tx_free_thresh must be less than the "
1235 "number of TX descriptors minus 3. "
1236 "(tx_free_thresh=%u port=%d queue=%d)",
1237 (unsigned int)tx_free_thresh,
1238 (int)dev->data->port_id,
1242 if (tx_rs_thresh > tx_free_thresh) {
1243 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1244 "equal to tx_free_thresh. (tx_free_thresh=%u"
1245 " tx_rs_thresh=%u port=%d queue=%d)",
1246 (unsigned int)tx_free_thresh,
1247 (unsigned int)tx_rs_thresh,
1248 (int)dev->data->port_id,
1252 if ((nb_desc % tx_rs_thresh) != 0) {
1253 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1254 "number of TX descriptors. (tx_rs_thresh=%u"
1255 " port=%d queue=%d)",
1256 (unsigned int)tx_rs_thresh,
1257 (int)dev->data->port_id,
1261 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1262 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1263 "tx_rs_thresh is greater than 1. "
1264 "(tx_rs_thresh=%u port=%d queue=%d)",
1265 (unsigned int)tx_rs_thresh,
1266 (int)dev->data->port_id,
1271 /* Free memory if needed. */
1272 if (dev->data->tx_queues[queue_idx]) {
1273 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1274 dev->data->tx_queues[queue_idx] = NULL;
1277 /* Allocate the TX queue data structure. */
1278 txq = rte_zmalloc_socket(NULL,
1279 sizeof(struct ice_tx_queue),
1280 RTE_CACHE_LINE_SIZE,
1283 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1284 "tx queue structure");
1288 /* Allocate TX hardware ring descriptors. */
1289 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1290 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1291 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1292 ring_size, ICE_RING_BASE_ALIGN,
1295 ice_tx_queue_release(txq);
1296 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1300 txq->nb_tx_desc = nb_desc;
1301 txq->tx_rs_thresh = tx_rs_thresh;
1302 txq->tx_free_thresh = tx_free_thresh;
1303 txq->pthresh = tx_conf->tx_thresh.pthresh;
1304 txq->hthresh = tx_conf->tx_thresh.hthresh;
1305 txq->wthresh = tx_conf->tx_thresh.wthresh;
1306 txq->queue_id = queue_idx;
1308 txq->reg_idx = vsi->base_queue + queue_idx;
1309 txq->port_id = dev->data->port_id;
1310 txq->offloads = offloads;
1312 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1314 txq->tx_ring_dma = tz->iova;
1315 txq->tx_ring = tz->addr;
1317 /* Allocate software ring */
1319 rte_zmalloc_socket(NULL,
1320 sizeof(struct ice_tx_entry) * nb_desc,
1321 RTE_CACHE_LINE_SIZE,
1323 if (!txq->sw_ring) {
1324 ice_tx_queue_release(txq);
1325 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1329 ice_reset_tx_queue(txq);
1331 dev->data->tx_queues[queue_idx] = txq;
1332 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1333 ice_set_tx_function_flag(dev, txq);
1339 ice_tx_queue_release(void *txq)
1341 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1344 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1349 rte_free(q->sw_ring);
1354 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1355 struct rte_eth_rxq_info *qinfo)
1357 struct ice_rx_queue *rxq;
1359 rxq = dev->data->rx_queues[queue_id];
1361 qinfo->mp = rxq->mp;
1362 qinfo->scattered_rx = dev->data->scattered_rx;
1363 qinfo->nb_desc = rxq->nb_rx_desc;
1365 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1366 qinfo->conf.rx_drop_en = rxq->drop_en;
1367 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1371 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1372 struct rte_eth_txq_info *qinfo)
1374 struct ice_tx_queue *txq;
1376 txq = dev->data->tx_queues[queue_id];
1378 qinfo->nb_desc = txq->nb_tx_desc;
1380 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1381 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1382 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1384 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1385 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1386 qinfo->conf.offloads = txq->offloads;
1387 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1391 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1393 #define ICE_RXQ_SCAN_INTERVAL 4
1394 volatile union ice_rx_flex_desc *rxdp;
1395 struct ice_rx_queue *rxq;
1398 rxq = dev->data->rx_queues[rx_queue_id];
1399 rxdp = &rxq->rx_ring[rxq->rx_tail];
1400 while ((desc < rxq->nb_rx_desc) &&
1401 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1402 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1404 * Check the DD bit of a rx descriptor of each 4 in a group,
1405 * to avoid checking too frequently and downgrading performance
1408 desc += ICE_RXQ_SCAN_INTERVAL;
1409 rxdp += ICE_RXQ_SCAN_INTERVAL;
1410 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1411 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1412 desc - rxq->nb_rx_desc]);
1418 #define ICE_RX_FLEX_ERR0_BITS \
1419 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1420 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1421 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1422 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1423 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1424 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1426 /* Rx L3/L4 checksum */
1427 static inline uint64_t
1428 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1432 /* check if HW has decoded the packet and checksum */
1433 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1436 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1437 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1441 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1442 flags |= PKT_RX_IP_CKSUM_BAD;
1444 flags |= PKT_RX_IP_CKSUM_GOOD;
1446 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1447 flags |= PKT_RX_L4_CKSUM_BAD;
1449 flags |= PKT_RX_L4_CKSUM_GOOD;
1451 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1452 flags |= PKT_RX_EIP_CKSUM_BAD;
1458 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1460 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1461 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1462 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1464 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1465 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1466 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1471 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1472 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1473 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1474 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1475 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1476 mb->vlan_tci_outer = mb->vlan_tci;
1477 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1478 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1479 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1480 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1482 mb->vlan_tci_outer = 0;
1485 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1486 mb->vlan_tci, mb->vlan_tci_outer);
1489 #define ICE_LOOK_AHEAD 8
1490 #if (ICE_LOOK_AHEAD != 8)
1491 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1494 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1496 volatile union ice_rx_flex_desc *rxdp;
1497 struct ice_rx_entry *rxep;
1498 struct rte_mbuf *mb;
1501 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1502 int32_t i, j, nb_rx = 0;
1503 uint64_t pkt_flags = 0;
1504 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1506 rxdp = &rxq->rx_ring[rxq->rx_tail];
1507 rxep = &rxq->sw_ring[rxq->rx_tail];
1509 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1511 /* Make sure there is at least 1 packet to receive */
1512 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1516 * Scan LOOK_AHEAD descriptors at a time to determine which
1517 * descriptors reference packets that are ready to be received.
1519 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1520 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1521 /* Read desc statuses backwards to avoid race condition */
1522 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1523 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1527 /* Compute how many status bits were set */
1528 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1529 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1533 /* Translate descriptor info to mbuf parameters */
1534 for (j = 0; j < nb_dd; j++) {
1536 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1537 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1538 mb->data_len = pkt_len;
1539 mb->pkt_len = pkt_len;
1541 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1542 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1543 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1544 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1545 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1546 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1548 mb->ol_flags |= pkt_flags;
1551 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1552 rxq->rx_stage[i + j] = rxep[j].mbuf;
1554 if (nb_dd != ICE_LOOK_AHEAD)
1558 /* Clear software ring entries */
1559 for (i = 0; i < nb_rx; i++)
1560 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1562 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1563 "port_id=%u, queue_id=%u, nb_rx=%d",
1564 rxq->port_id, rxq->queue_id, nb_rx);
1569 static inline uint16_t
1570 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1571 struct rte_mbuf **rx_pkts,
1575 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1577 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1579 for (i = 0; i < nb_pkts; i++)
1580 rx_pkts[i] = stage[i];
1582 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1583 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1589 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1591 volatile union ice_rx_flex_desc *rxdp;
1592 struct ice_rx_entry *rxep;
1593 struct rte_mbuf *mb;
1594 uint16_t alloc_idx, i;
1598 /* Allocate buffers in bulk */
1599 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1600 (rxq->rx_free_thresh - 1));
1601 rxep = &rxq->sw_ring[alloc_idx];
1602 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1603 rxq->rx_free_thresh);
1604 if (unlikely(diag != 0)) {
1605 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1609 rxdp = &rxq->rx_ring[alloc_idx];
1610 for (i = 0; i < rxq->rx_free_thresh; i++) {
1611 if (likely(i < (rxq->rx_free_thresh - 1)))
1612 /* Prefetch next mbuf */
1613 rte_prefetch0(rxep[i + 1].mbuf);
1616 rte_mbuf_refcnt_set(mb, 1);
1618 mb->data_off = RTE_PKTMBUF_HEADROOM;
1620 mb->port = rxq->port_id;
1621 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1622 rxdp[i].read.hdr_addr = 0;
1623 rxdp[i].read.pkt_addr = dma_addr;
1626 /* Update rx tail regsiter */
1627 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1629 rxq->rx_free_trigger =
1630 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1631 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1632 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1637 static inline uint16_t
1638 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1640 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1642 struct rte_eth_dev *dev;
1647 if (rxq->rx_nb_avail)
1648 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1650 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1651 rxq->rx_next_avail = 0;
1652 rxq->rx_nb_avail = nb_rx;
1653 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1655 if (rxq->rx_tail > rxq->rx_free_trigger) {
1656 if (ice_rx_alloc_bufs(rxq) != 0) {
1659 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1660 dev->data->rx_mbuf_alloc_failed +=
1661 rxq->rx_free_thresh;
1662 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1663 "port_id=%u, queue_id=%u",
1664 rxq->port_id, rxq->queue_id);
1665 rxq->rx_nb_avail = 0;
1666 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1667 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1668 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1674 if (rxq->rx_tail >= rxq->nb_rx_desc)
1677 if (rxq->rx_nb_avail)
1678 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1684 ice_recv_pkts_bulk_alloc(void *rx_queue,
1685 struct rte_mbuf **rx_pkts,
1692 if (unlikely(nb_pkts == 0))
1695 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1696 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1699 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1700 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1701 nb_rx = (uint16_t)(nb_rx + count);
1702 nb_pkts = (uint16_t)(nb_pkts - count);
1711 ice_recv_scattered_pkts(void *rx_queue,
1712 struct rte_mbuf **rx_pkts,
1715 struct ice_rx_queue *rxq = rx_queue;
1716 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1717 volatile union ice_rx_flex_desc *rxdp;
1718 union ice_rx_flex_desc rxd;
1719 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1720 struct ice_rx_entry *rxe;
1721 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1722 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1723 struct rte_mbuf *nmb; /* new allocated mbuf */
1724 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1725 uint16_t rx_id = rxq->rx_tail;
1727 uint16_t nb_hold = 0;
1728 uint16_t rx_packet_len;
1729 uint16_t rx_stat_err0;
1732 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1733 struct rte_eth_dev *dev;
1735 while (nb_rx < nb_pkts) {
1736 rxdp = &rx_ring[rx_id];
1737 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1739 /* Check the DD bit first */
1740 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1744 nmb = rte_mbuf_raw_alloc(rxq->mp);
1745 if (unlikely(!nmb)) {
1746 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1747 dev->data->rx_mbuf_alloc_failed++;
1750 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1753 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1755 if (unlikely(rx_id == rxq->nb_rx_desc))
1758 /* Prefetch next mbuf */
1759 rte_prefetch0(sw_ring[rx_id].mbuf);
1762 * When next RX descriptor is on a cache line boundary,
1763 * prefetch the next 4 RX descriptors and next 8 pointers
1766 if ((rx_id & 0x3) == 0) {
1767 rte_prefetch0(&rx_ring[rx_id]);
1768 rte_prefetch0(&sw_ring[rx_id]);
1774 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1776 /* Set data buffer address and data length of the mbuf */
1777 rxdp->read.hdr_addr = 0;
1778 rxdp->read.pkt_addr = dma_addr;
1779 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1780 ICE_RX_FLX_DESC_PKT_LEN_M;
1781 rxm->data_len = rx_packet_len;
1782 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1785 * If this is the first buffer of the received packet, set the
1786 * pointer to the first mbuf of the packet and initialize its
1787 * context. Otherwise, update the total length and the number
1788 * of segments of the current scattered packet, and update the
1789 * pointer to the last mbuf of the current packet.
1793 first_seg->nb_segs = 1;
1794 first_seg->pkt_len = rx_packet_len;
1796 first_seg->pkt_len =
1797 (uint16_t)(first_seg->pkt_len +
1799 first_seg->nb_segs++;
1800 last_seg->next = rxm;
1804 * If this is not the last buffer of the received packet,
1805 * update the pointer to the last mbuf of the current scattered
1806 * packet and continue to parse the RX ring.
1808 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1814 * This is the last buffer of the received packet. If the CRC
1815 * is not stripped by the hardware:
1816 * - Subtract the CRC length from the total packet length.
1817 * - If the last buffer only contains the whole CRC or a part
1818 * of it, free the mbuf associated to the last buffer. If part
1819 * of the CRC is also contained in the previous mbuf, subtract
1820 * the length of that CRC part from the data length of the
1824 if (unlikely(rxq->crc_len > 0)) {
1825 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1826 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1827 rte_pktmbuf_free_seg(rxm);
1828 first_seg->nb_segs--;
1829 last_seg->data_len =
1830 (uint16_t)(last_seg->data_len -
1831 (RTE_ETHER_CRC_LEN - rx_packet_len));
1832 last_seg->next = NULL;
1834 rxm->data_len = (uint16_t)(rx_packet_len -
1838 first_seg->port = rxq->port_id;
1839 first_seg->ol_flags = 0;
1840 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1841 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1842 ice_rxd_to_vlan_tci(first_seg, &rxd);
1843 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1844 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1845 first_seg->ol_flags |= pkt_flags;
1846 /* Prefetch data of first segment, if configured to do so. */
1847 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1848 first_seg->data_off));
1849 rx_pkts[nb_rx++] = first_seg;
1853 /* Record index of the next RX descriptor to probe. */
1854 rxq->rx_tail = rx_id;
1855 rxq->pkt_first_seg = first_seg;
1856 rxq->pkt_last_seg = last_seg;
1859 * If the number of free RX descriptors is greater than the RX free
1860 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1861 * register. Update the RDT with the value of the last processed RX
1862 * descriptor minus 1, to guarantee that the RDT register is never
1863 * equal to the RDH register, which creates a "full" ring situtation
1864 * from the hardware point of view.
1866 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1867 if (nb_hold > rxq->rx_free_thresh) {
1868 rx_id = (uint16_t)(rx_id == 0 ?
1869 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1870 /* write TAIL register */
1871 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1874 rxq->nb_rx_hold = nb_hold;
1876 /* return received packet in the burst */
1881 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1883 struct ice_adapter *ad =
1884 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1885 const uint32_t *ptypes;
1887 static const uint32_t ptypes_os[] = {
1888 /* refers to ice_get_default_pkt_type() */
1890 RTE_PTYPE_L2_ETHER_TIMESYNC,
1891 RTE_PTYPE_L2_ETHER_LLDP,
1892 RTE_PTYPE_L2_ETHER_ARP,
1893 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1894 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1897 RTE_PTYPE_L4_NONFRAG,
1901 RTE_PTYPE_TUNNEL_GRENAT,
1902 RTE_PTYPE_TUNNEL_IP,
1903 RTE_PTYPE_INNER_L2_ETHER,
1904 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1905 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1906 RTE_PTYPE_INNER_L4_FRAG,
1907 RTE_PTYPE_INNER_L4_ICMP,
1908 RTE_PTYPE_INNER_L4_NONFRAG,
1909 RTE_PTYPE_INNER_L4_SCTP,
1910 RTE_PTYPE_INNER_L4_TCP,
1911 RTE_PTYPE_INNER_L4_UDP,
1915 static const uint32_t ptypes_comms[] = {
1916 /* refers to ice_get_default_pkt_type() */
1918 RTE_PTYPE_L2_ETHER_TIMESYNC,
1919 RTE_PTYPE_L2_ETHER_LLDP,
1920 RTE_PTYPE_L2_ETHER_ARP,
1921 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1922 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1925 RTE_PTYPE_L4_NONFRAG,
1929 RTE_PTYPE_TUNNEL_GRENAT,
1930 RTE_PTYPE_TUNNEL_IP,
1931 RTE_PTYPE_INNER_L2_ETHER,
1932 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1933 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1934 RTE_PTYPE_INNER_L4_FRAG,
1935 RTE_PTYPE_INNER_L4_ICMP,
1936 RTE_PTYPE_INNER_L4_NONFRAG,
1937 RTE_PTYPE_INNER_L4_SCTP,
1938 RTE_PTYPE_INNER_L4_TCP,
1939 RTE_PTYPE_INNER_L4_UDP,
1940 RTE_PTYPE_TUNNEL_GTPC,
1941 RTE_PTYPE_TUNNEL_GTPU,
1942 RTE_PTYPE_L2_ETHER_PPPOE,
1946 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1947 ptypes = ptypes_comms;
1951 if (dev->rx_pkt_burst == ice_recv_pkts ||
1952 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1953 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1957 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1958 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1959 #ifdef CC_AVX512_SUPPORT
1960 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
1961 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
1963 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1964 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1972 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1974 volatile union ice_rx_flex_desc *rxdp;
1975 struct ice_rx_queue *rxq = rx_queue;
1978 if (unlikely(offset >= rxq->nb_rx_desc))
1981 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1982 return RTE_ETH_RX_DESC_UNAVAIL;
1984 desc = rxq->rx_tail + offset;
1985 if (desc >= rxq->nb_rx_desc)
1986 desc -= rxq->nb_rx_desc;
1988 rxdp = &rxq->rx_ring[desc];
1989 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1990 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
1991 return RTE_ETH_RX_DESC_DONE;
1993 return RTE_ETH_RX_DESC_AVAIL;
1997 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1999 struct ice_tx_queue *txq = tx_queue;
2000 volatile uint64_t *status;
2001 uint64_t mask, expect;
2004 if (unlikely(offset >= txq->nb_tx_desc))
2007 desc = txq->tx_tail + offset;
2008 /* go to next desc that has the RS bit */
2009 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2011 if (desc >= txq->nb_tx_desc) {
2012 desc -= txq->nb_tx_desc;
2013 if (desc >= txq->nb_tx_desc)
2014 desc -= txq->nb_tx_desc;
2017 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2018 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2019 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2020 ICE_TXD_QW1_DTYPE_S);
2021 if ((*status & mask) == expect)
2022 return RTE_ETH_TX_DESC_DONE;
2024 return RTE_ETH_TX_DESC_FULL;
2028 ice_free_queues(struct rte_eth_dev *dev)
2032 PMD_INIT_FUNC_TRACE();
2034 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2035 if (!dev->data->rx_queues[i])
2037 ice_rx_queue_release(dev->data->rx_queues[i]);
2038 dev->data->rx_queues[i] = NULL;
2039 rte_eth_dma_zone_free(dev, "rx_ring", i);
2041 dev->data->nb_rx_queues = 0;
2043 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2044 if (!dev->data->tx_queues[i])
2046 ice_tx_queue_release(dev->data->tx_queues[i]);
2047 dev->data->tx_queues[i] = NULL;
2048 rte_eth_dma_zone_free(dev, "tx_ring", i);
2050 dev->data->nb_tx_queues = 0;
2053 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
2054 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
2057 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2059 struct ice_tx_queue *txq;
2060 const struct rte_memzone *tz = NULL;
2062 struct rte_eth_dev *dev;
2065 PMD_DRV_LOG(ERR, "PF is not available");
2069 dev = pf->adapter->eth_dev;
2071 /* Allocate the TX queue data structure. */
2072 txq = rte_zmalloc_socket("ice fdir tx queue",
2073 sizeof(struct ice_tx_queue),
2074 RTE_CACHE_LINE_SIZE,
2077 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2078 "tx queue structure.");
2082 /* Allocate TX hardware ring descriptors. */
2083 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2084 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2086 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2087 ICE_FDIR_QUEUE_ID, ring_size,
2088 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2090 ice_tx_queue_release(txq);
2091 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2095 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2096 txq->queue_id = ICE_FDIR_QUEUE_ID;
2097 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2098 txq->vsi = pf->fdir.fdir_vsi;
2100 txq->tx_ring_dma = tz->iova;
2101 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2103 * don't need to allocate software ring and reset for the fdir
2104 * program queue just set the queue has been configured.
2109 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2115 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2117 struct ice_rx_queue *rxq;
2118 const struct rte_memzone *rz = NULL;
2120 struct rte_eth_dev *dev;
2123 PMD_DRV_LOG(ERR, "PF is not available");
2127 dev = pf->adapter->eth_dev;
2129 /* Allocate the RX queue data structure. */
2130 rxq = rte_zmalloc_socket("ice fdir rx queue",
2131 sizeof(struct ice_rx_queue),
2132 RTE_CACHE_LINE_SIZE,
2135 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2136 "rx queue structure.");
2140 /* Allocate RX hardware ring descriptors. */
2141 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2142 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2144 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2145 ICE_FDIR_QUEUE_ID, ring_size,
2146 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2148 ice_rx_queue_release(rxq);
2149 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2153 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2154 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2155 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2156 rxq->vsi = pf->fdir.fdir_vsi;
2158 rxq->rx_ring_dma = rz->iova;
2159 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2160 sizeof(union ice_32byte_rx_desc));
2161 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2164 * Don't need to allocate software ring and reset for the fdir
2165 * rx queue, just set the queue has been configured.
2170 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2176 ice_recv_pkts(void *rx_queue,
2177 struct rte_mbuf **rx_pkts,
2180 struct ice_rx_queue *rxq = rx_queue;
2181 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2182 volatile union ice_rx_flex_desc *rxdp;
2183 union ice_rx_flex_desc rxd;
2184 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2185 struct ice_rx_entry *rxe;
2186 struct rte_mbuf *nmb; /* new allocated mbuf */
2187 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2188 uint16_t rx_id = rxq->rx_tail;
2190 uint16_t nb_hold = 0;
2191 uint16_t rx_packet_len;
2192 uint16_t rx_stat_err0;
2195 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2196 struct rte_eth_dev *dev;
2198 while (nb_rx < nb_pkts) {
2199 rxdp = &rx_ring[rx_id];
2200 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2202 /* Check the DD bit first */
2203 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2207 nmb = rte_mbuf_raw_alloc(rxq->mp);
2208 if (unlikely(!nmb)) {
2209 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2210 dev->data->rx_mbuf_alloc_failed++;
2213 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2216 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2218 if (unlikely(rx_id == rxq->nb_rx_desc))
2223 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2226 * fill the read format of descriptor with physic address in
2227 * new allocated mbuf: nmb
2229 rxdp->read.hdr_addr = 0;
2230 rxdp->read.pkt_addr = dma_addr;
2232 /* calculate rx_packet_len of the received pkt */
2233 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2234 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2236 /* fill old mbuf with received descriptor: rxd */
2237 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2238 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2241 rxm->pkt_len = rx_packet_len;
2242 rxm->data_len = rx_packet_len;
2243 rxm->port = rxq->port_id;
2244 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2245 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2246 ice_rxd_to_vlan_tci(rxm, &rxd);
2247 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
2248 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2249 rxm->ol_flags |= pkt_flags;
2250 /* copy old mbuf to rx_pkts */
2251 rx_pkts[nb_rx++] = rxm;
2253 rxq->rx_tail = rx_id;
2255 * If the number of free RX descriptors is greater than the RX free
2256 * threshold of the queue, advance the receive tail register of queue.
2257 * Update that register with the value of the last processed RX
2258 * descriptor minus 1.
2260 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2261 if (nb_hold > rxq->rx_free_thresh) {
2262 rx_id = (uint16_t)(rx_id == 0 ?
2263 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2264 /* write TAIL register */
2265 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2268 rxq->nb_rx_hold = nb_hold;
2270 /* return received packet in the burst */
2275 ice_parse_tunneling_params(uint64_t ol_flags,
2276 union ice_tx_offload tx_offload,
2277 uint32_t *cd_tunneling)
2279 /* EIPT: External (outer) IP header type */
2280 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2281 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2282 else if (ol_flags & PKT_TX_OUTER_IPV4)
2283 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2284 else if (ol_flags & PKT_TX_OUTER_IPV6)
2285 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2287 /* EIPLEN: External (outer) IP header length, in DWords */
2288 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2289 ICE_TXD_CTX_QW0_EIPLEN_S;
2291 /* L4TUNT: L4 Tunneling Type */
2292 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2293 case PKT_TX_TUNNEL_IPIP:
2294 /* for non UDP / GRE tunneling, set to 00b */
2296 case PKT_TX_TUNNEL_VXLAN:
2297 case PKT_TX_TUNNEL_GTP:
2298 case PKT_TX_TUNNEL_GENEVE:
2299 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2301 case PKT_TX_TUNNEL_GRE:
2302 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2305 PMD_TX_LOG(ERR, "Tunnel type not supported");
2309 /* L4TUNLEN: L4 Tunneling Length, in Words
2311 * We depend on app to set rte_mbuf.l2_len correctly.
2312 * For IP in GRE it should be set to the length of the GRE
2314 * For MAC in GRE or MAC in UDP it should be set to the length
2315 * of the GRE or UDP headers plus the inner MAC up to including
2316 * its last Ethertype.
2317 * If MPLS labels exists, it should include them as well.
2319 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2320 ICE_TXD_CTX_QW0_NATLEN_S;
2322 if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
2323 (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2324 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2325 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2329 ice_txd_enable_checksum(uint64_t ol_flags,
2331 uint32_t *td_offset,
2332 union ice_tx_offload tx_offload)
2335 if (ol_flags & PKT_TX_TUNNEL_MASK)
2336 *td_offset |= (tx_offload.outer_l2_len >> 1)
2337 << ICE_TX_DESC_LEN_MACLEN_S;
2339 *td_offset |= (tx_offload.l2_len >> 1)
2340 << ICE_TX_DESC_LEN_MACLEN_S;
2342 /* Enable L3 checksum offloads */
2343 if (ol_flags & PKT_TX_IP_CKSUM) {
2344 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2345 *td_offset |= (tx_offload.l3_len >> 2) <<
2346 ICE_TX_DESC_LEN_IPLEN_S;
2347 } else if (ol_flags & PKT_TX_IPV4) {
2348 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2349 *td_offset |= (tx_offload.l3_len >> 2) <<
2350 ICE_TX_DESC_LEN_IPLEN_S;
2351 } else if (ol_flags & PKT_TX_IPV6) {
2352 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2353 *td_offset |= (tx_offload.l3_len >> 2) <<
2354 ICE_TX_DESC_LEN_IPLEN_S;
2357 if (ol_flags & PKT_TX_TCP_SEG) {
2358 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2359 *td_offset |= (tx_offload.l4_len >> 2) <<
2360 ICE_TX_DESC_LEN_L4_LEN_S;
2364 /* Enable L4 checksum offloads */
2365 switch (ol_flags & PKT_TX_L4_MASK) {
2366 case PKT_TX_TCP_CKSUM:
2367 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2368 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2369 ICE_TX_DESC_LEN_L4_LEN_S;
2371 case PKT_TX_SCTP_CKSUM:
2372 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2373 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2374 ICE_TX_DESC_LEN_L4_LEN_S;
2376 case PKT_TX_UDP_CKSUM:
2377 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2378 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2379 ICE_TX_DESC_LEN_L4_LEN_S;
2387 ice_xmit_cleanup(struct ice_tx_queue *txq)
2389 struct ice_tx_entry *sw_ring = txq->sw_ring;
2390 volatile struct ice_tx_desc *txd = txq->tx_ring;
2391 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2392 uint16_t nb_tx_desc = txq->nb_tx_desc;
2393 uint16_t desc_to_clean_to;
2394 uint16_t nb_tx_to_clean;
2396 /* Determine the last descriptor needing to be cleaned */
2397 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2398 if (desc_to_clean_to >= nb_tx_desc)
2399 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2401 /* Check to make sure the last descriptor to clean is done */
2402 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2403 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2404 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2405 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
2406 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2408 txq->port_id, txq->queue_id,
2409 txd[desc_to_clean_to].cmd_type_offset_bsz);
2410 /* Failed to clean any descriptors */
2414 /* Figure out how many descriptors will be cleaned */
2415 if (last_desc_cleaned > desc_to_clean_to)
2416 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2419 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2422 /* The last descriptor to clean is done, so that means all the
2423 * descriptors from the last descriptor that was cleaned
2424 * up to the last descriptor with the RS bit set
2425 * are done. Only reset the threshold descriptor.
2427 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2429 /* Update the txq to reflect the last descriptor that was cleaned */
2430 txq->last_desc_cleaned = desc_to_clean_to;
2431 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2436 /* Construct the tx flags */
2437 static inline uint64_t
2438 ice_build_ctob(uint32_t td_cmd,
2443 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2444 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2445 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2446 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2447 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2450 /* Check if the context descriptor is needed for TX offloading */
2451 static inline uint16_t
2452 ice_calc_context_desc(uint64_t flags)
2454 static uint64_t mask = PKT_TX_TCP_SEG |
2456 PKT_TX_OUTER_IP_CKSUM |
2459 return (flags & mask) ? 1 : 0;
2462 /* set ice TSO context descriptor */
2463 static inline uint64_t
2464 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2466 uint64_t ctx_desc = 0;
2467 uint32_t cd_cmd, hdr_len, cd_tso_len;
2469 if (!tx_offload.l4_len) {
2470 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2474 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2475 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2476 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2478 cd_cmd = ICE_TX_CTX_DESC_TSO;
2479 cd_tso_len = mbuf->pkt_len - hdr_len;
2480 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2481 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2482 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2487 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2488 #define ICE_MAX_DATA_PER_TXD \
2489 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2490 /* Calculate the number of TX descriptors needed for each pkt */
2491 static inline uint16_t
2492 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2494 struct rte_mbuf *txd = tx_pkt;
2497 while (txd != NULL) {
2498 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2506 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2508 struct ice_tx_queue *txq;
2509 volatile struct ice_tx_desc *tx_ring;
2510 volatile struct ice_tx_desc *txd;
2511 struct ice_tx_entry *sw_ring;
2512 struct ice_tx_entry *txe, *txn;
2513 struct rte_mbuf *tx_pkt;
2514 struct rte_mbuf *m_seg;
2515 uint32_t cd_tunneling_params;
2520 uint32_t td_cmd = 0;
2521 uint32_t td_offset = 0;
2522 uint32_t td_tag = 0;
2525 uint64_t buf_dma_addr;
2527 union ice_tx_offload tx_offload = {0};
2530 sw_ring = txq->sw_ring;
2531 tx_ring = txq->tx_ring;
2532 tx_id = txq->tx_tail;
2533 txe = &sw_ring[tx_id];
2535 /* Check if the descriptor ring needs to be cleaned. */
2536 if (txq->nb_tx_free < txq->tx_free_thresh)
2537 (void)ice_xmit_cleanup(txq);
2539 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2540 tx_pkt = *tx_pkts++;
2545 ol_flags = tx_pkt->ol_flags;
2546 tx_offload.l2_len = tx_pkt->l2_len;
2547 tx_offload.l3_len = tx_pkt->l3_len;
2548 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2549 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2550 tx_offload.l4_len = tx_pkt->l4_len;
2551 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2552 /* Calculate the number of context descriptors needed. */
2553 nb_ctx = ice_calc_context_desc(ol_flags);
2555 /* The number of descriptors that must be allocated for
2556 * a packet equals to the number of the segments of that
2557 * packet plus the number of context descriptor if needed.
2558 * Recalculate the needed tx descs when TSO enabled in case
2559 * the mbuf data size exceeds max data size that hw allows
2562 if (ol_flags & PKT_TX_TCP_SEG)
2563 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2566 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2567 tx_last = (uint16_t)(tx_id + nb_used - 1);
2570 if (tx_last >= txq->nb_tx_desc)
2571 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2573 if (nb_used > txq->nb_tx_free) {
2574 if (ice_xmit_cleanup(txq) != 0) {
2579 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2580 while (nb_used > txq->nb_tx_free) {
2581 if (ice_xmit_cleanup(txq) != 0) {
2590 /* Descriptor based VLAN insertion */
2591 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2592 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2593 td_tag = tx_pkt->vlan_tci;
2596 /* Fill in tunneling parameters if necessary */
2597 cd_tunneling_params = 0;
2598 if (ol_flags & PKT_TX_TUNNEL_MASK)
2599 ice_parse_tunneling_params(ol_flags, tx_offload,
2600 &cd_tunneling_params);
2602 /* Enable checksum offloading */
2603 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2604 ice_txd_enable_checksum(ol_flags, &td_cmd,
2605 &td_offset, tx_offload);
2608 /* Setup TX context descriptor if required */
2609 volatile struct ice_tx_ctx_desc *ctx_txd =
2610 (volatile struct ice_tx_ctx_desc *)
2612 uint16_t cd_l2tag2 = 0;
2613 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2615 txn = &sw_ring[txe->next_id];
2616 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2618 rte_pktmbuf_free_seg(txe->mbuf);
2622 if (ol_flags & PKT_TX_TCP_SEG)
2623 cd_type_cmd_tso_mss |=
2624 ice_set_tso_ctx(tx_pkt, tx_offload);
2626 ctx_txd->tunneling_params =
2627 rte_cpu_to_le_32(cd_tunneling_params);
2629 /* TX context descriptor based double VLAN insert */
2630 if (ol_flags & PKT_TX_QINQ) {
2631 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2632 cd_type_cmd_tso_mss |=
2633 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2634 ICE_TXD_CTX_QW1_CMD_S);
2636 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2638 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2640 txe->last_id = tx_last;
2641 tx_id = txe->next_id;
2647 txd = &tx_ring[tx_id];
2648 txn = &sw_ring[txe->next_id];
2651 rte_pktmbuf_free_seg(txe->mbuf);
2654 /* Setup TX Descriptor */
2655 slen = m_seg->data_len;
2656 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2658 while ((ol_flags & PKT_TX_TCP_SEG) &&
2659 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2660 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2661 txd->cmd_type_offset_bsz =
2662 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2663 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2664 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2665 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2666 ICE_TXD_QW1_TX_BUF_SZ_S) |
2667 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2669 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2670 slen -= ICE_MAX_DATA_PER_TXD;
2672 txe->last_id = tx_last;
2673 tx_id = txe->next_id;
2675 txd = &tx_ring[tx_id];
2676 txn = &sw_ring[txe->next_id];
2679 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2680 txd->cmd_type_offset_bsz =
2681 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2682 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2683 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2684 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2685 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2687 txe->last_id = tx_last;
2688 tx_id = txe->next_id;
2690 m_seg = m_seg->next;
2693 /* fill the last descriptor with End of Packet (EOP) bit */
2694 td_cmd |= ICE_TX_DESC_CMD_EOP;
2695 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2696 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2698 /* set RS bit on the last descriptor of one packet */
2699 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2700 PMD_TX_FREE_LOG(DEBUG,
2701 "Setting RS bit on TXD id="
2702 "%4u (port=%d queue=%d)",
2703 tx_last, txq->port_id, txq->queue_id);
2705 td_cmd |= ICE_TX_DESC_CMD_RS;
2707 /* Update txq RS bit counters */
2708 txq->nb_tx_used = 0;
2710 txd->cmd_type_offset_bsz |=
2711 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2715 /* update Tail register */
2716 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2717 txq->tx_tail = tx_id;
2722 static __rte_always_inline int
2723 ice_tx_free_bufs(struct ice_tx_queue *txq)
2725 struct ice_tx_entry *txep;
2728 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2729 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2730 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2733 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2735 for (i = 0; i < txq->tx_rs_thresh; i++)
2736 rte_prefetch0((txep + i)->mbuf);
2738 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2739 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2740 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2744 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2745 rte_pktmbuf_free_seg(txep->mbuf);
2750 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2751 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2752 if (txq->tx_next_dd >= txq->nb_tx_desc)
2753 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2755 return txq->tx_rs_thresh;
2759 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2762 struct ice_tx_entry *swr_ring = txq->sw_ring;
2763 uint16_t i, tx_last, tx_id;
2764 uint16_t nb_tx_free_last;
2765 uint16_t nb_tx_to_clean;
2768 /* Start free mbuf from the next of tx_tail */
2769 tx_last = txq->tx_tail;
2770 tx_id = swr_ring[tx_last].next_id;
2772 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2775 nb_tx_to_clean = txq->nb_tx_free;
2776 nb_tx_free_last = txq->nb_tx_free;
2778 free_cnt = txq->nb_tx_desc;
2780 /* Loop through swr_ring to count the amount of
2781 * freeable mubfs and packets.
2783 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2784 for (i = 0; i < nb_tx_to_clean &&
2785 pkt_cnt < free_cnt &&
2786 tx_id != tx_last; i++) {
2787 if (swr_ring[tx_id].mbuf != NULL) {
2788 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2789 swr_ring[tx_id].mbuf = NULL;
2792 * last segment in the packet,
2793 * increment packet count
2795 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2798 tx_id = swr_ring[tx_id].next_id;
2801 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2802 txq->nb_tx_free || tx_id == tx_last)
2805 if (pkt_cnt < free_cnt) {
2806 if (ice_xmit_cleanup(txq))
2809 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2810 nb_tx_free_last = txq->nb_tx_free;
2814 return (int)pkt_cnt;
2819 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2820 uint32_t free_cnt __rte_unused)
2827 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2832 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2833 free_cnt = txq->nb_tx_desc;
2835 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2837 for (i = 0; i < cnt; i += n) {
2838 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2841 n = ice_tx_free_bufs(txq);
2851 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2853 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2854 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2855 struct ice_adapter *ad =
2856 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2859 if (ad->tx_vec_allowed)
2860 return ice_tx_done_cleanup_vec(q, free_cnt);
2862 if (ad->tx_simple_allowed)
2863 return ice_tx_done_cleanup_simple(q, free_cnt);
2865 return ice_tx_done_cleanup_full(q, free_cnt);
2868 /* Populate 4 descriptors with data from 4 mbufs */
2870 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2875 for (i = 0; i < 4; i++, txdp++, pkts++) {
2876 dma_addr = rte_mbuf_data_iova(*pkts);
2877 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2878 txdp->cmd_type_offset_bsz =
2879 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2880 (*pkts)->data_len, 0);
2884 /* Populate 1 descriptor with data from 1 mbuf */
2886 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2890 dma_addr = rte_mbuf_data_iova(*pkts);
2891 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2892 txdp->cmd_type_offset_bsz =
2893 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2894 (*pkts)->data_len, 0);
2898 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2901 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2902 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2903 const int N_PER_LOOP = 4;
2904 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2905 int mainpart, leftover;
2909 * Process most of the packets in chunks of N pkts. Any
2910 * leftover packets will get processed one at a time.
2912 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2913 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2914 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2915 /* Copy N mbuf pointers to the S/W ring */
2916 for (j = 0; j < N_PER_LOOP; ++j)
2917 (txep + i + j)->mbuf = *(pkts + i + j);
2918 tx4(txdp + i, pkts + i);
2921 if (unlikely(leftover > 0)) {
2922 for (i = 0; i < leftover; ++i) {
2923 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2924 tx1(txdp + mainpart + i, pkts + mainpart + i);
2929 static inline uint16_t
2930 tx_xmit_pkts(struct ice_tx_queue *txq,
2931 struct rte_mbuf **tx_pkts,
2934 volatile struct ice_tx_desc *txr = txq->tx_ring;
2938 * Begin scanning the H/W ring for done descriptors when the number
2939 * of available descriptors drops below tx_free_thresh. For each done
2940 * descriptor, free the associated buffer.
2942 if (txq->nb_tx_free < txq->tx_free_thresh)
2943 ice_tx_free_bufs(txq);
2945 /* Use available descriptor only */
2946 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2947 if (unlikely(!nb_pkts))
2950 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2951 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2952 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2953 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2954 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2955 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2957 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2961 /* Fill hardware descriptor ring with mbuf data */
2962 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2963 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2965 /* Determin if RS bit needs to be set */
2966 if (txq->tx_tail > txq->tx_next_rs) {
2967 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2968 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2971 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2972 if (txq->tx_next_rs >= txq->nb_tx_desc)
2973 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2976 if (txq->tx_tail >= txq->nb_tx_desc)
2979 /* Update the tx tail register */
2980 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
2986 ice_xmit_pkts_simple(void *tx_queue,
2987 struct rte_mbuf **tx_pkts,
2992 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2993 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2997 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3000 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3001 &tx_pkts[nb_tx], num);
3002 nb_tx = (uint16_t)(nb_tx + ret);
3003 nb_pkts = (uint16_t)(nb_pkts - ret);
3012 ice_set_rx_function(struct rte_eth_dev *dev)
3014 PMD_INIT_FUNC_TRACE();
3015 struct ice_adapter *ad =
3016 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3018 struct ice_rx_queue *rxq;
3020 bool use_avx512 = false;
3021 bool use_avx2 = false;
3023 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3024 if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
3025 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3026 ad->rx_vec_allowed = true;
3027 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3028 rxq = dev->data->rx_queues[i];
3029 if (rxq && ice_rxq_vec_setup(rxq)) {
3030 ad->rx_vec_allowed = false;
3035 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3036 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3037 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3038 #ifdef CC_AVX512_SUPPORT
3042 "AVX512 is not supported in build env");
3045 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3046 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3047 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3051 ad->rx_vec_allowed = false;
3055 if (ad->rx_vec_allowed) {
3056 if (dev->data->scattered_rx) {
3058 #ifdef CC_AVX512_SUPPORT
3060 "Using AVX512 Vector Scattered Rx (port %d).",
3061 dev->data->port_id);
3063 ice_recv_scattered_pkts_vec_avx512;
3067 "Using %sVector Scattered Rx (port %d).",
3068 use_avx2 ? "avx2 " : "",
3069 dev->data->port_id);
3070 dev->rx_pkt_burst = use_avx2 ?
3071 ice_recv_scattered_pkts_vec_avx2 :
3072 ice_recv_scattered_pkts_vec;
3076 #ifdef CC_AVX512_SUPPORT
3078 "Using AVX512 Vector Rx (port %d).",
3079 dev->data->port_id);
3081 ice_recv_pkts_vec_avx512;
3085 "Using %sVector Rx (port %d).",
3086 use_avx2 ? "avx2 " : "",
3087 dev->data->port_id);
3088 dev->rx_pkt_burst = use_avx2 ?
3089 ice_recv_pkts_vec_avx2 :
3098 if (dev->data->scattered_rx) {
3099 /* Set the non-LRO scattered function */
3101 "Using a Scattered function on port %d.",
3102 dev->data->port_id);
3103 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3104 } else if (ad->rx_bulk_alloc_allowed) {
3106 "Rx Burst Bulk Alloc Preconditions are "
3107 "satisfied. Rx Burst Bulk Alloc function "
3108 "will be used on port %d.",
3109 dev->data->port_id);
3110 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3113 "Rx Burst Bulk Alloc Preconditions are not "
3114 "satisfied, Normal Rx will be used on port %d.",
3115 dev->data->port_id);
3116 dev->rx_pkt_burst = ice_recv_pkts;
3120 static const struct {
3121 eth_rx_burst_t pkt_burst;
3123 } ice_rx_burst_infos[] = {
3124 { ice_recv_scattered_pkts, "Scalar Scattered" },
3125 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3126 { ice_recv_pkts, "Scalar" },
3128 #ifdef CC_AVX512_SUPPORT
3129 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3130 { ice_recv_pkts_vec_avx512, "Vector AVX512" },
3132 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3133 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3134 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3135 { ice_recv_pkts_vec, "Vector SSE" },
3140 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3141 struct rte_eth_burst_mode *mode)
3143 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3147 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3148 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3149 snprintf(mode->info, sizeof(mode->info), "%s",
3150 ice_rx_burst_infos[i].info);
3160 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3162 struct ice_adapter *ad =
3163 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3165 /* Use a simple Tx queue if possible (only fast free is allowed) */
3166 ad->tx_simple_allowed =
3168 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3169 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3171 if (ad->tx_simple_allowed)
3172 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3176 "Simple Tx can NOT be enabled on Tx queue %u.",
3180 /*********************************************************************
3184 **********************************************************************/
3185 /* The default values of TSO MSS */
3186 #define ICE_MIN_TSO_MSS 64
3187 #define ICE_MAX_TSO_MSS 9728
3188 #define ICE_MAX_TSO_FRAME_SIZE 262144
3190 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3197 for (i = 0; i < nb_pkts; i++) {
3199 ol_flags = m->ol_flags;
3201 if (ol_flags & PKT_TX_TCP_SEG &&
3202 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3203 m->tso_segsz > ICE_MAX_TSO_MSS ||
3204 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3206 * MSS outside the range are considered malicious
3212 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3213 ret = rte_validate_tx_offload(m);
3219 ret = rte_net_intel_cksum_prepare(m);
3229 ice_set_tx_function(struct rte_eth_dev *dev)
3231 struct ice_adapter *ad =
3232 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3234 struct ice_tx_queue *txq;
3236 bool use_avx512 = false;
3237 bool use_avx2 = false;
3239 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3240 if (!ice_tx_vec_dev_check(dev) &&
3241 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3242 ad->tx_vec_allowed = true;
3243 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3244 txq = dev->data->tx_queues[i];
3245 if (txq && ice_txq_vec_setup(txq)) {
3246 ad->tx_vec_allowed = false;
3251 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3252 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3253 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3254 #ifdef CC_AVX512_SUPPORT
3258 "AVX512 is not supported in build env");
3261 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3262 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3263 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3267 ad->tx_vec_allowed = false;
3271 if (ad->tx_vec_allowed) {
3273 #ifdef CC_AVX512_SUPPORT
3274 PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
3275 dev->data->port_id);
3276 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3279 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3280 use_avx2 ? "avx2 " : "",
3281 dev->data->port_id);
3282 dev->tx_pkt_burst = use_avx2 ?
3283 ice_xmit_pkts_vec_avx2 :
3286 dev->tx_pkt_prepare = NULL;
3292 if (ad->tx_simple_allowed) {
3293 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3294 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3295 dev->tx_pkt_prepare = NULL;
3297 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3298 dev->tx_pkt_burst = ice_xmit_pkts;
3299 dev->tx_pkt_prepare = ice_prep_pkts;
3303 static const struct {
3304 eth_tx_burst_t pkt_burst;
3306 } ice_tx_burst_infos[] = {
3307 { ice_xmit_pkts_simple, "Scalar Simple" },
3308 { ice_xmit_pkts, "Scalar" },
3310 #ifdef CC_AVX512_SUPPORT
3311 { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3313 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3314 { ice_xmit_pkts_vec, "Vector SSE" },
3319 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3320 struct rte_eth_burst_mode *mode)
3322 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3326 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3327 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3328 snprintf(mode->info, sizeof(mode->info), "%s",
3329 ice_tx_burst_infos[i].info);
3338 /* For each value it means, datasheet of hardware can tell more details
3340 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3342 static inline uint32_t
3343 ice_get_default_pkt_type(uint16_t ptype)
3345 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3346 __rte_cache_aligned = {
3349 [1] = RTE_PTYPE_L2_ETHER,
3350 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3351 /* [3] - [5] reserved */
3352 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3353 /* [7] - [10] reserved */
3354 [11] = RTE_PTYPE_L2_ETHER_ARP,
3355 /* [12] - [21] reserved */
3357 /* Non tunneled IPv4 */
3358 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3360 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3361 RTE_PTYPE_L4_NONFRAG,
3362 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3365 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3367 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3369 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3373 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3374 RTE_PTYPE_TUNNEL_IP |
3375 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3376 RTE_PTYPE_INNER_L4_FRAG,
3377 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3378 RTE_PTYPE_TUNNEL_IP |
3379 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3380 RTE_PTYPE_INNER_L4_NONFRAG,
3381 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3382 RTE_PTYPE_TUNNEL_IP |
3383 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3384 RTE_PTYPE_INNER_L4_UDP,
3386 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3387 RTE_PTYPE_TUNNEL_IP |
3388 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3389 RTE_PTYPE_INNER_L4_TCP,
3390 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3391 RTE_PTYPE_TUNNEL_IP |
3392 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3393 RTE_PTYPE_INNER_L4_SCTP,
3394 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3395 RTE_PTYPE_TUNNEL_IP |
3396 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3397 RTE_PTYPE_INNER_L4_ICMP,
3400 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3401 RTE_PTYPE_TUNNEL_IP |
3402 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3403 RTE_PTYPE_INNER_L4_FRAG,
3404 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3405 RTE_PTYPE_TUNNEL_IP |
3406 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3407 RTE_PTYPE_INNER_L4_NONFRAG,
3408 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3409 RTE_PTYPE_TUNNEL_IP |
3410 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3411 RTE_PTYPE_INNER_L4_UDP,
3413 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3414 RTE_PTYPE_TUNNEL_IP |
3415 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3416 RTE_PTYPE_INNER_L4_TCP,
3417 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3418 RTE_PTYPE_TUNNEL_IP |
3419 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3420 RTE_PTYPE_INNER_L4_SCTP,
3421 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3422 RTE_PTYPE_TUNNEL_IP |
3423 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3424 RTE_PTYPE_INNER_L4_ICMP,
3426 /* IPv4 --> GRE/Teredo/VXLAN */
3427 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3428 RTE_PTYPE_TUNNEL_GRENAT,
3430 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3431 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3432 RTE_PTYPE_TUNNEL_GRENAT |
3433 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3434 RTE_PTYPE_INNER_L4_FRAG,
3435 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3436 RTE_PTYPE_TUNNEL_GRENAT |
3437 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3438 RTE_PTYPE_INNER_L4_NONFRAG,
3439 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3440 RTE_PTYPE_TUNNEL_GRENAT |
3441 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3442 RTE_PTYPE_INNER_L4_UDP,
3444 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3445 RTE_PTYPE_TUNNEL_GRENAT |
3446 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3447 RTE_PTYPE_INNER_L4_TCP,
3448 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3449 RTE_PTYPE_TUNNEL_GRENAT |
3450 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3451 RTE_PTYPE_INNER_L4_SCTP,
3452 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3453 RTE_PTYPE_TUNNEL_GRENAT |
3454 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3455 RTE_PTYPE_INNER_L4_ICMP,
3457 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3458 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3459 RTE_PTYPE_TUNNEL_GRENAT |
3460 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3461 RTE_PTYPE_INNER_L4_FRAG,
3462 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3463 RTE_PTYPE_TUNNEL_GRENAT |
3464 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3465 RTE_PTYPE_INNER_L4_NONFRAG,
3466 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3467 RTE_PTYPE_TUNNEL_GRENAT |
3468 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3469 RTE_PTYPE_INNER_L4_UDP,
3471 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3472 RTE_PTYPE_TUNNEL_GRENAT |
3473 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3474 RTE_PTYPE_INNER_L4_TCP,
3475 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3476 RTE_PTYPE_TUNNEL_GRENAT |
3477 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3478 RTE_PTYPE_INNER_L4_SCTP,
3479 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3480 RTE_PTYPE_TUNNEL_GRENAT |
3481 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3482 RTE_PTYPE_INNER_L4_ICMP,
3484 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3485 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3486 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3488 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3489 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3490 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3491 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3492 RTE_PTYPE_INNER_L4_FRAG,
3493 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3494 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3495 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3496 RTE_PTYPE_INNER_L4_NONFRAG,
3497 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3498 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3499 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3500 RTE_PTYPE_INNER_L4_UDP,
3502 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3503 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3504 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3505 RTE_PTYPE_INNER_L4_TCP,
3506 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3507 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3508 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3509 RTE_PTYPE_INNER_L4_SCTP,
3510 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3511 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3512 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3513 RTE_PTYPE_INNER_L4_ICMP,
3515 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3516 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3517 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3518 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3519 RTE_PTYPE_INNER_L4_FRAG,
3520 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3521 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3522 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3523 RTE_PTYPE_INNER_L4_NONFRAG,
3524 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3525 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3526 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3527 RTE_PTYPE_INNER_L4_UDP,
3529 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3530 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3531 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3532 RTE_PTYPE_INNER_L4_TCP,
3533 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3534 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3535 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3536 RTE_PTYPE_INNER_L4_SCTP,
3537 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3538 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3539 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3540 RTE_PTYPE_INNER_L4_ICMP,
3541 /* [73] - [87] reserved */
3543 /* Non tunneled IPv6 */
3544 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3546 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3547 RTE_PTYPE_L4_NONFRAG,
3548 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3551 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3553 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3555 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3559 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3560 RTE_PTYPE_TUNNEL_IP |
3561 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3562 RTE_PTYPE_INNER_L4_FRAG,
3563 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3564 RTE_PTYPE_TUNNEL_IP |
3565 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3566 RTE_PTYPE_INNER_L4_NONFRAG,
3567 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3568 RTE_PTYPE_TUNNEL_IP |
3569 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3570 RTE_PTYPE_INNER_L4_UDP,
3572 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3573 RTE_PTYPE_TUNNEL_IP |
3574 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3575 RTE_PTYPE_INNER_L4_TCP,
3576 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3577 RTE_PTYPE_TUNNEL_IP |
3578 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3579 RTE_PTYPE_INNER_L4_SCTP,
3580 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3581 RTE_PTYPE_TUNNEL_IP |
3582 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3583 RTE_PTYPE_INNER_L4_ICMP,
3586 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3587 RTE_PTYPE_TUNNEL_IP |
3588 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3589 RTE_PTYPE_INNER_L4_FRAG,
3590 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3591 RTE_PTYPE_TUNNEL_IP |
3592 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3593 RTE_PTYPE_INNER_L4_NONFRAG,
3594 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3595 RTE_PTYPE_TUNNEL_IP |
3596 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3597 RTE_PTYPE_INNER_L4_UDP,
3598 /* [105] reserved */
3599 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3600 RTE_PTYPE_TUNNEL_IP |
3601 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3602 RTE_PTYPE_INNER_L4_TCP,
3603 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3604 RTE_PTYPE_TUNNEL_IP |
3605 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3606 RTE_PTYPE_INNER_L4_SCTP,
3607 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3608 RTE_PTYPE_TUNNEL_IP |
3609 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3610 RTE_PTYPE_INNER_L4_ICMP,
3612 /* IPv6 --> GRE/Teredo/VXLAN */
3613 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3614 RTE_PTYPE_TUNNEL_GRENAT,
3616 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3617 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3618 RTE_PTYPE_TUNNEL_GRENAT |
3619 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3620 RTE_PTYPE_INNER_L4_FRAG,
3621 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3622 RTE_PTYPE_TUNNEL_GRENAT |
3623 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3624 RTE_PTYPE_INNER_L4_NONFRAG,
3625 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3626 RTE_PTYPE_TUNNEL_GRENAT |
3627 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3628 RTE_PTYPE_INNER_L4_UDP,
3629 /* [113] reserved */
3630 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3631 RTE_PTYPE_TUNNEL_GRENAT |
3632 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3633 RTE_PTYPE_INNER_L4_TCP,
3634 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3635 RTE_PTYPE_TUNNEL_GRENAT |
3636 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3637 RTE_PTYPE_INNER_L4_SCTP,
3638 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3639 RTE_PTYPE_TUNNEL_GRENAT |
3640 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3641 RTE_PTYPE_INNER_L4_ICMP,
3643 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3644 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3645 RTE_PTYPE_TUNNEL_GRENAT |
3646 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3647 RTE_PTYPE_INNER_L4_FRAG,
3648 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3649 RTE_PTYPE_TUNNEL_GRENAT |
3650 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3651 RTE_PTYPE_INNER_L4_NONFRAG,
3652 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3653 RTE_PTYPE_TUNNEL_GRENAT |
3654 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3655 RTE_PTYPE_INNER_L4_UDP,
3656 /* [120] reserved */
3657 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3658 RTE_PTYPE_TUNNEL_GRENAT |
3659 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3660 RTE_PTYPE_INNER_L4_TCP,
3661 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3662 RTE_PTYPE_TUNNEL_GRENAT |
3663 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3664 RTE_PTYPE_INNER_L4_SCTP,
3665 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3666 RTE_PTYPE_TUNNEL_GRENAT |
3667 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3668 RTE_PTYPE_INNER_L4_ICMP,
3670 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3671 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3672 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3674 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3675 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3676 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3677 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3678 RTE_PTYPE_INNER_L4_FRAG,
3679 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3680 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3681 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3682 RTE_PTYPE_INNER_L4_NONFRAG,
3683 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3684 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3685 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3686 RTE_PTYPE_INNER_L4_UDP,
3687 /* [128] reserved */
3688 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3689 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3690 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3691 RTE_PTYPE_INNER_L4_TCP,
3692 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3693 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3694 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3695 RTE_PTYPE_INNER_L4_SCTP,
3696 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3697 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3698 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3699 RTE_PTYPE_INNER_L4_ICMP,
3701 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3702 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3703 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3704 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3705 RTE_PTYPE_INNER_L4_FRAG,
3706 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3707 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3708 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3709 RTE_PTYPE_INNER_L4_NONFRAG,
3710 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3711 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3712 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3713 RTE_PTYPE_INNER_L4_UDP,
3714 /* [135] reserved */
3715 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3716 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3717 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3718 RTE_PTYPE_INNER_L4_TCP,
3719 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3720 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3721 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3722 RTE_PTYPE_INNER_L4_SCTP,
3723 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3724 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3725 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3726 RTE_PTYPE_INNER_L4_ICMP,
3727 /* [139] - [299] reserved */
3730 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3731 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3733 /* PPPoE --> IPv4 */
3734 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3735 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3737 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3738 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3739 RTE_PTYPE_L4_NONFRAG,
3740 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3741 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3743 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3744 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3746 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3747 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3749 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3750 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3753 /* PPPoE --> IPv6 */
3754 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3755 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3757 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3758 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3759 RTE_PTYPE_L4_NONFRAG,
3760 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3761 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3763 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3764 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3766 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3767 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3769 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3770 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3772 /* [314] - [324] reserved */
3774 /* IPv4/IPv6 --> GTPC/GTPU */
3775 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3776 RTE_PTYPE_TUNNEL_GTPC,
3777 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3778 RTE_PTYPE_TUNNEL_GTPC,
3779 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3780 RTE_PTYPE_TUNNEL_GTPC,
3781 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3782 RTE_PTYPE_TUNNEL_GTPC,
3783 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3784 RTE_PTYPE_TUNNEL_GTPU,
3785 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3786 RTE_PTYPE_TUNNEL_GTPU,
3788 /* IPv4 --> GTPU --> IPv4 */
3789 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3790 RTE_PTYPE_TUNNEL_GTPU |
3791 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3792 RTE_PTYPE_INNER_L4_FRAG,
3793 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3794 RTE_PTYPE_TUNNEL_GTPU |
3795 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3796 RTE_PTYPE_INNER_L4_NONFRAG,
3797 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3798 RTE_PTYPE_TUNNEL_GTPU |
3799 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3800 RTE_PTYPE_INNER_L4_UDP,
3801 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3802 RTE_PTYPE_TUNNEL_GTPU |
3803 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3804 RTE_PTYPE_INNER_L4_TCP,
3805 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3806 RTE_PTYPE_TUNNEL_GTPU |
3807 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3808 RTE_PTYPE_INNER_L4_ICMP,
3810 /* IPv6 --> GTPU --> IPv4 */
3811 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3812 RTE_PTYPE_TUNNEL_GTPU |
3813 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3814 RTE_PTYPE_INNER_L4_FRAG,
3815 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3816 RTE_PTYPE_TUNNEL_GTPU |
3817 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3818 RTE_PTYPE_INNER_L4_NONFRAG,
3819 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3820 RTE_PTYPE_TUNNEL_GTPU |
3821 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3822 RTE_PTYPE_INNER_L4_UDP,
3823 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3824 RTE_PTYPE_TUNNEL_GTPU |
3825 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3826 RTE_PTYPE_INNER_L4_TCP,
3827 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3828 RTE_PTYPE_TUNNEL_GTPU |
3829 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3830 RTE_PTYPE_INNER_L4_ICMP,
3832 /* IPv4 --> GTPU --> IPv6 */
3833 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3834 RTE_PTYPE_TUNNEL_GTPU |
3835 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3836 RTE_PTYPE_INNER_L4_FRAG,
3837 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3838 RTE_PTYPE_TUNNEL_GTPU |
3839 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3840 RTE_PTYPE_INNER_L4_NONFRAG,
3841 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3842 RTE_PTYPE_TUNNEL_GTPU |
3843 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3844 RTE_PTYPE_INNER_L4_UDP,
3845 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3846 RTE_PTYPE_TUNNEL_GTPU |
3847 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3848 RTE_PTYPE_INNER_L4_TCP,
3849 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3850 RTE_PTYPE_TUNNEL_GTPU |
3851 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3852 RTE_PTYPE_INNER_L4_ICMP,
3854 /* IPv6 --> GTPU --> IPv6 */
3855 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3856 RTE_PTYPE_TUNNEL_GTPU |
3857 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3858 RTE_PTYPE_INNER_L4_FRAG,
3859 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3860 RTE_PTYPE_TUNNEL_GTPU |
3861 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3862 RTE_PTYPE_INNER_L4_NONFRAG,
3863 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3864 RTE_PTYPE_TUNNEL_GTPU |
3865 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3866 RTE_PTYPE_INNER_L4_UDP,
3867 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3868 RTE_PTYPE_TUNNEL_GTPU |
3869 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3870 RTE_PTYPE_INNER_L4_TCP,
3871 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3872 RTE_PTYPE_TUNNEL_GTPU |
3873 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3874 RTE_PTYPE_INNER_L4_ICMP,
3875 /* All others reserved */
3878 return type_table[ptype];
3882 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3884 struct ice_adapter *ad =
3885 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3888 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3889 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
3892 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
3893 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
3894 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
3895 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
3896 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
3898 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
3899 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
3900 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
3901 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
3902 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
3903 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
3906 * check the programming status descriptor in rx queue.
3907 * done after Programming Flow Director is programmed on
3911 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
3913 volatile union ice_32byte_rx_desc *rxdp;
3920 rxdp = (volatile union ice_32byte_rx_desc *)
3921 (&rxq->rx_ring[rxq->rx_tail]);
3922 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
3923 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
3924 >> ICE_RXD_QW1_STATUS_S;
3926 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
3928 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
3929 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
3930 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
3931 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
3933 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
3934 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
3935 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
3936 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
3940 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
3941 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
3943 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
3947 rxdp->wb.qword1.status_error_len = 0;
3949 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
3951 if (rxq->rx_tail == 0)
3952 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
3954 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
3960 #define ICE_FDIR_MAX_WAIT_US 10000
3963 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
3965 struct ice_tx_queue *txq = pf->fdir.txq;
3966 struct ice_rx_queue *rxq = pf->fdir.rxq;
3967 volatile struct ice_fltr_desc *fdirdp;
3968 volatile struct ice_tx_desc *txdp;
3972 fdirdp = (volatile struct ice_fltr_desc *)
3973 (&txq->tx_ring[txq->tx_tail]);
3974 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
3975 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
3977 txdp = &txq->tx_ring[txq->tx_tail + 1];
3978 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
3979 td_cmd = ICE_TX_DESC_CMD_EOP |
3980 ICE_TX_DESC_CMD_RS |
3981 ICE_TX_DESC_CMD_DUMMY;
3983 txdp->cmd_type_offset_bsz =
3984 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
3987 if (txq->tx_tail >= txq->nb_tx_desc)
3989 /* Update the tx tail register */
3990 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
3991 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
3992 if ((txdp->cmd_type_offset_bsz &
3993 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
3994 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3998 if (i >= ICE_FDIR_MAX_WAIT_US) {
4000 "Failed to program FDIR filter: time out to get DD on tx queue.");
4004 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4007 ret = ice_check_fdir_programming_status(rxq);
4015 "Failed to program FDIR filter: programming status reported.");