1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <ethdev_driver.h>
9 #include "rte_pmd_ice.h"
11 #include "ice_rxtx_vec_common.h"
13 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
17 PKT_TX_OUTER_IP_CKSUM)
19 /* Offset of mbuf dynamic field for protocol extraction data */
20 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
22 /* Mask of mbuf dynamic flags for protocol extraction type */
23 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
28 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
31 ice_monitor_callback(const uint64_t value,
32 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
34 const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
36 * we expect the DD bit to be set to 1 if this descriptor was already
39 return (value & m) == m ? -1 : 0;
43 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
45 volatile union ice_rx_flex_desc *rxdp;
46 struct ice_rx_queue *rxq = rx_queue;
50 rxdp = &rxq->rx_ring[desc];
51 /* watch for changes in status bit */
52 pmc->addr = &rxdp->wb.status_error0;
54 /* comparison callback */
55 pmc->fn = ice_monitor_callback;
57 /* register is 16-bit */
58 pmc->size = sizeof(uint16_t);
65 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
67 static uint8_t rxdid_map[] = {
68 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
69 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
70 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
71 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
72 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
73 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
74 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
77 return xtr_type < RTE_DIM(rxdid_map) ?
78 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
82 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
84 volatile union ice_rx_flex_desc *rxdp)
86 volatile struct ice_32b_rx_flex_desc_comms *desc =
87 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
88 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
90 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
91 mb->ol_flags |= PKT_RX_RSS_HASH;
92 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
95 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
96 if (desc->flow_id != 0xFFFFFFFF) {
97 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
98 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
104 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
106 volatile union ice_rx_flex_desc *rxdp)
108 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
109 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
110 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
114 if (desc->flow_id != 0xFFFFFFFF) {
115 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
116 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
119 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
120 stat_err = rte_le_to_cpu_16(desc->status_error0);
121 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
122 mb->ol_flags |= PKT_RX_RSS_HASH;
123 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
129 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
131 volatile union ice_rx_flex_desc *rxdp)
133 volatile struct ice_32b_rx_flex_desc_comms *desc =
134 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
137 stat_err = rte_le_to_cpu_16(desc->status_error0);
138 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
139 mb->ol_flags |= PKT_RX_RSS_HASH;
140 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
143 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
144 if (desc->flow_id != 0xFFFFFFFF) {
145 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
146 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
149 if (rxq->xtr_ol_flag) {
150 uint32_t metadata = 0;
152 stat_err = rte_le_to_cpu_16(desc->status_error1);
154 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
155 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
157 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
159 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
162 mb->ol_flags |= rxq->xtr_ol_flag;
164 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
171 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
173 volatile union ice_rx_flex_desc *rxdp)
175 volatile struct ice_32b_rx_flex_desc_comms *desc =
176 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
179 stat_err = rte_le_to_cpu_16(desc->status_error0);
180 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
181 mb->ol_flags |= PKT_RX_RSS_HASH;
182 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
185 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
186 if (desc->flow_id != 0xFFFFFFFF) {
187 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
188 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
191 if (rxq->xtr_ol_flag) {
192 uint32_t metadata = 0;
194 if (desc->flex_ts.flex.aux0 != 0xFFFF)
195 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
196 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
197 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
200 mb->ol_flags |= rxq->xtr_ol_flag;
202 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
209 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
212 case ICE_RXDID_COMMS_AUX_VLAN:
213 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
214 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
217 case ICE_RXDID_COMMS_AUX_IPV4:
218 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
219 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
222 case ICE_RXDID_COMMS_AUX_IPV6:
223 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
224 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
227 case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
228 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
229 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
232 case ICE_RXDID_COMMS_AUX_TCP:
233 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
234 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
237 case ICE_RXDID_COMMS_AUX_IP_OFFSET:
238 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
239 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
242 case ICE_RXDID_COMMS_GENERIC:
243 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
246 case ICE_RXDID_COMMS_OVS:
247 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
251 /* update this according to the RXDID for PROTO_XTR_NONE */
252 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
256 if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
257 rxq->xtr_ol_flag = 0;
260 static enum ice_status
261 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
263 struct ice_vsi *vsi = rxq->vsi;
264 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
265 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
266 struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
267 struct ice_rlan_ctx rx_ctx;
270 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
272 struct ice_adapter *ad = rxq->vsi->adapter;
273 uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
275 /* Set buffer size as the head split is disabled. */
276 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
277 RTE_PKTMBUF_HEADROOM);
279 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
281 RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
284 if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN ||
285 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
286 PMD_DRV_LOG(ERR, "maximum packet length must "
287 "be larger than %u and smaller than %u",
288 (uint32_t)RTE_ETHER_MIN_LEN,
289 (uint32_t)ICE_FRAME_SIZE_MAX);
293 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
294 /* Register mbuf field and flag for Rx timestamp */
295 err = rte_mbuf_dyn_rx_timestamp_register(
296 &ice_timestamp_dynfield_offset,
297 &ice_timestamp_dynflag);
300 "Cannot register mbuf field/flag for timestamp");
305 memset(&rx_ctx, 0, sizeof(rx_ctx));
307 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
308 rx_ctx.qlen = rxq->nb_rx_desc;
309 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
310 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
311 rx_ctx.dtype = 0; /* No Header Split mode */
312 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
313 rx_ctx.dsize = 1; /* 32B descriptors */
315 rx_ctx.rxmax = rxq->max_pkt_len;
316 /* TPH: Transaction Layer Packet (TLP) processing hints */
317 rx_ctx.tphrdesc_ena = 1;
318 rx_ctx.tphwdesc_ena = 1;
319 rx_ctx.tphdata_ena = 1;
320 rx_ctx.tphhead_ena = 1;
321 /* Low Receive Queue Threshold defined in 64 descriptors units.
322 * When the number of free descriptors goes below the lrxqthresh,
323 * an immediate interrupt is triggered.
325 rx_ctx.lrxqthresh = 2;
326 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
329 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
331 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
333 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
334 rxq->port_id, rxq->queue_id, rxdid);
336 if (!(pf->supported_rxdid & BIT(rxdid))) {
337 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
342 ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
344 /* Enable Flexible Descriptors in the queue context which
345 * allows this driver to select a specific receive descriptor format
347 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
348 QRXFLXP_CNTXT_RXDID_IDX_M;
350 /* increasing context priority to pick up profile ID;
351 * default is 0x01; setting to 0x03 to ensure profile
352 * is programming if prev context is of same priority
354 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
355 QRXFLXP_CNTXT_RXDID_PRIO_M;
357 if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
358 regval |= QRXFLXP_CNTXT_TS_M;
360 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
362 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
364 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
368 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
370 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
375 /* Check if scattered RX needs to be used. */
376 if (frame_size > buf_size)
377 dev_data->scattered_rx = 1;
379 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
381 /* Init the Rx tail register*/
382 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
387 /* Allocate mbufs for all descriptors in rx queue */
389 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
391 struct ice_rx_entry *rxe = rxq->sw_ring;
395 for (i = 0; i < rxq->nb_rx_desc; i++) {
396 volatile union ice_rx_flex_desc *rxd;
397 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
399 if (unlikely(!mbuf)) {
400 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
404 rte_mbuf_refcnt_set(mbuf, 1);
406 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
408 mbuf->port = rxq->port_id;
411 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
413 rxd = &rxq->rx_ring[i];
414 rxd->read.pkt_addr = dma_addr;
415 rxd->read.hdr_addr = 0;
416 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
426 /* Free all mbufs for descriptors in rx queue */
428 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
432 if (!rxq || !rxq->sw_ring) {
433 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
437 for (i = 0; i < rxq->nb_rx_desc; i++) {
438 if (rxq->sw_ring[i].mbuf) {
439 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
440 rxq->sw_ring[i].mbuf = NULL;
443 if (rxq->rx_nb_avail == 0)
445 for (i = 0; i < rxq->rx_nb_avail; i++)
446 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
448 rxq->rx_nb_avail = 0;
451 /* turn on or off rx queue
452 * @q_idx: queue index in pf scope
453 * @on: turn on or off the queue
456 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
461 /* QRX_CTRL = QRX_ENA */
462 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
465 if (reg & QRX_CTRL_QENA_STAT_M)
466 return 0; /* Already on, skip */
467 reg |= QRX_CTRL_QENA_REQ_M;
469 if (!(reg & QRX_CTRL_QENA_STAT_M))
470 return 0; /* Already off, skip */
471 reg &= ~QRX_CTRL_QENA_REQ_M;
474 /* Write the register */
475 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
476 /* Check the result. It is said that QENA_STAT
477 * follows the QENA_REQ not more than 10 use.
478 * TODO: need to change the wait counter later
480 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
481 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
482 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
484 if ((reg & QRX_CTRL_QENA_REQ_M) &&
485 (reg & QRX_CTRL_QENA_STAT_M))
488 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
489 !(reg & QRX_CTRL_QENA_STAT_M))
494 /* Check if it is timeout */
495 if (j >= ICE_CHK_Q_ENA_COUNT) {
496 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
497 (on ? "enable" : "disable"), q_idx);
505 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
509 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
510 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
511 "rxq->rx_free_thresh=%d, "
512 "ICE_RX_MAX_BURST=%d",
513 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
515 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
516 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
517 "rxq->rx_free_thresh=%d, "
518 "rxq->nb_rx_desc=%d",
519 rxq->rx_free_thresh, rxq->nb_rx_desc);
521 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
522 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
523 "rxq->nb_rx_desc=%d, "
524 "rxq->rx_free_thresh=%d",
525 rxq->nb_rx_desc, rxq->rx_free_thresh);
532 /* reset fields in ice_rx_queue back to default */
534 ice_reset_rx_queue(struct ice_rx_queue *rxq)
540 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
544 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
546 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
547 ((volatile char *)rxq->rx_ring)[i] = 0;
549 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
550 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
551 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
553 rxq->rx_nb_avail = 0;
554 rxq->rx_next_avail = 0;
555 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
559 rxq->pkt_first_seg = NULL;
560 rxq->pkt_last_seg = NULL;
562 rxq->rxrearm_start = 0;
567 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
569 struct ice_rx_queue *rxq;
571 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
573 PMD_INIT_FUNC_TRACE();
575 if (rx_queue_id >= dev->data->nb_rx_queues) {
576 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
577 rx_queue_id, dev->data->nb_rx_queues);
581 rxq = dev->data->rx_queues[rx_queue_id];
582 if (!rxq || !rxq->q_set) {
583 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
588 err = ice_program_hw_rx_queue(rxq);
590 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
595 err = ice_alloc_rx_queue_mbufs(rxq);
597 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
601 /* Init the RX tail register. */
602 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
604 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
606 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
609 rxq->rx_rel_mbufs(rxq);
610 ice_reset_rx_queue(rxq);
614 dev->data->rx_queue_state[rx_queue_id] =
615 RTE_ETH_QUEUE_STATE_STARTED;
621 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
623 struct ice_rx_queue *rxq;
625 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
627 if (rx_queue_id < dev->data->nb_rx_queues) {
628 rxq = dev->data->rx_queues[rx_queue_id];
630 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
632 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
636 rxq->rx_rel_mbufs(rxq);
637 ice_reset_rx_queue(rxq);
638 dev->data->rx_queue_state[rx_queue_id] =
639 RTE_ETH_QUEUE_STATE_STOPPED;
646 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
648 struct ice_tx_queue *txq;
652 struct ice_aqc_add_tx_qgrp *txq_elem;
653 struct ice_tlan_ctx tx_ctx;
656 PMD_INIT_FUNC_TRACE();
658 if (tx_queue_id >= dev->data->nb_tx_queues) {
659 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
660 tx_queue_id, dev->data->nb_tx_queues);
664 txq = dev->data->tx_queues[tx_queue_id];
665 if (!txq || !txq->q_set) {
666 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
671 buf_len = ice_struct_size(txq_elem, txqs, 1);
672 txq_elem = ice_malloc(hw, buf_len);
677 hw = ICE_VSI_TO_HW(vsi);
679 memset(&tx_ctx, 0, sizeof(tx_ctx));
680 txq_elem->num_txqs = 1;
681 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
683 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
684 tx_ctx.qlen = txq->nb_tx_desc;
685 tx_ctx.pf_num = hw->pf_id;
686 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
687 tx_ctx.src_vsi = vsi->vsi_id;
688 tx_ctx.port_num = hw->port_info->lport;
689 tx_ctx.tso_ena = 1; /* tso enable */
690 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
691 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
694 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
697 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
699 /* Init the Tx tail register*/
700 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
702 /* Fix me, we assume TC always 0 here */
703 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
704 txq_elem, buf_len, NULL);
706 PMD_DRV_LOG(ERR, "Failed to add lan txq");
710 /* store the schedule node id */
711 txq->q_teid = txq_elem->txqs[0].q_teid;
713 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
719 static enum ice_status
720 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
722 struct ice_vsi *vsi = rxq->vsi;
723 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
724 uint32_t rxdid = ICE_RXDID_LEGACY_1;
725 struct ice_rlan_ctx rx_ctx;
730 rxq->rx_buf_len = 1024;
732 memset(&rx_ctx, 0, sizeof(rx_ctx));
734 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
735 rx_ctx.qlen = rxq->nb_rx_desc;
736 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
737 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
738 rx_ctx.dtype = 0; /* No Header Split mode */
739 rx_ctx.dsize = 1; /* 32B descriptors */
740 rx_ctx.rxmax = ICE_ETH_MAX_LEN;
741 /* TPH: Transaction Layer Packet (TLP) processing hints */
742 rx_ctx.tphrdesc_ena = 1;
743 rx_ctx.tphwdesc_ena = 1;
744 rx_ctx.tphdata_ena = 1;
745 rx_ctx.tphhead_ena = 1;
746 /* Low Receive Queue Threshold defined in 64 descriptors units.
747 * When the number of free descriptors goes below the lrxqthresh,
748 * an immediate interrupt is triggered.
750 rx_ctx.lrxqthresh = 2;
751 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
754 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
756 /* Enable Flexible Descriptors in the queue context which
757 * allows this driver to select a specific receive descriptor format
759 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
760 QRXFLXP_CNTXT_RXDID_IDX_M;
762 /* increasing context priority to pick up profile ID;
763 * default is 0x01; setting to 0x03 to ensure profile
764 * is programming if prev context is of same priority
766 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
767 QRXFLXP_CNTXT_RXDID_PRIO_M;
769 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
771 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
773 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
777 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
779 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
784 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
786 /* Init the Rx tail register*/
787 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
793 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
795 struct ice_rx_queue *rxq;
797 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
798 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
800 PMD_INIT_FUNC_TRACE();
803 if (!rxq || !rxq->q_set) {
804 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
809 err = ice_fdir_program_hw_rx_queue(rxq);
811 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
816 /* Init the RX tail register. */
817 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
819 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
821 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
824 ice_reset_rx_queue(rxq);
832 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
834 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
835 struct ice_tx_queue *txq;
839 struct ice_aqc_add_tx_qgrp *txq_elem;
840 struct ice_tlan_ctx tx_ctx;
843 PMD_INIT_FUNC_TRACE();
846 if (!txq || !txq->q_set) {
847 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
852 buf_len = ice_struct_size(txq_elem, txqs, 1);
853 txq_elem = ice_malloc(hw, buf_len);
858 hw = ICE_VSI_TO_HW(vsi);
860 memset(&tx_ctx, 0, sizeof(tx_ctx));
861 txq_elem->num_txqs = 1;
862 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
864 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
865 tx_ctx.qlen = txq->nb_tx_desc;
866 tx_ctx.pf_num = hw->pf_id;
867 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
868 tx_ctx.src_vsi = vsi->vsi_id;
869 tx_ctx.port_num = hw->port_info->lport;
870 tx_ctx.tso_ena = 1; /* tso enable */
871 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
872 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
874 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
877 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
879 /* Init the Tx tail register*/
880 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
882 /* Fix me, we assume TC always 0 here */
883 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
884 txq_elem, buf_len, NULL);
886 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
890 /* store the schedule node id */
891 txq->q_teid = txq_elem->txqs[0].q_teid;
897 /* Free all mbufs for descriptors in tx queue */
899 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
903 if (!txq || !txq->sw_ring) {
904 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
908 for (i = 0; i < txq->nb_tx_desc; i++) {
909 if (txq->sw_ring[i].mbuf) {
910 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
911 txq->sw_ring[i].mbuf = NULL;
917 ice_reset_tx_queue(struct ice_tx_queue *txq)
919 struct ice_tx_entry *txe;
920 uint16_t i, prev, size;
923 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
928 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
929 for (i = 0; i < size; i++)
930 ((volatile char *)txq->tx_ring)[i] = 0;
932 prev = (uint16_t)(txq->nb_tx_desc - 1);
933 for (i = 0; i < txq->nb_tx_desc; i++) {
934 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
936 txd->cmd_type_offset_bsz =
937 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
940 txe[prev].next_id = i;
944 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
945 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
950 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
951 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
955 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
957 struct ice_tx_queue *txq;
958 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
959 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
960 struct ice_vsi *vsi = pf->main_vsi;
961 enum ice_status status;
964 uint16_t q_handle = tx_queue_id;
966 if (tx_queue_id >= dev->data->nb_tx_queues) {
967 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
968 tx_queue_id, dev->data->nb_tx_queues);
972 txq = dev->data->tx_queues[tx_queue_id];
974 PMD_DRV_LOG(ERR, "TX queue %u is not available",
979 q_ids[0] = txq->reg_idx;
980 q_teids[0] = txq->q_teid;
982 /* Fix me, we assume TC always 0 here */
983 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
984 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
985 if (status != ICE_SUCCESS) {
986 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
990 txq->tx_rel_mbufs(txq);
991 ice_reset_tx_queue(txq);
992 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
998 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1000 struct ice_rx_queue *rxq;
1002 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1003 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1007 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1009 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1013 rxq->rx_rel_mbufs(rxq);
1019 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1021 struct ice_tx_queue *txq;
1022 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1023 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1024 struct ice_vsi *vsi = pf->main_vsi;
1025 enum ice_status status;
1027 uint32_t q_teids[1];
1028 uint16_t q_handle = tx_queue_id;
1032 PMD_DRV_LOG(ERR, "TX queue %u is not available",
1038 q_ids[0] = txq->reg_idx;
1039 q_teids[0] = txq->q_teid;
1041 /* Fix me, we assume TC always 0 here */
1042 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1043 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1044 if (status != ICE_SUCCESS) {
1045 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1049 txq->tx_rel_mbufs(txq);
1055 ice_rx_queue_setup(struct rte_eth_dev *dev,
1058 unsigned int socket_id,
1059 const struct rte_eth_rxconf *rx_conf,
1060 struct rte_mempool *mp)
1062 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1063 struct ice_adapter *ad =
1064 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1065 struct ice_vsi *vsi = pf->main_vsi;
1066 struct ice_rx_queue *rxq;
1067 const struct rte_memzone *rz;
1070 int use_def_burst_func = 1;
1073 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1074 nb_desc > ICE_MAX_RING_DESC ||
1075 nb_desc < ICE_MIN_RING_DESC) {
1076 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1077 "invalid", nb_desc);
1081 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1083 /* Free memory if needed */
1084 if (dev->data->rx_queues[queue_idx]) {
1085 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1086 dev->data->rx_queues[queue_idx] = NULL;
1089 /* Allocate the rx queue data structure */
1090 rxq = rte_zmalloc_socket(NULL,
1091 sizeof(struct ice_rx_queue),
1092 RTE_CACHE_LINE_SIZE,
1095 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1096 "rx queue data structure");
1100 rxq->nb_rx_desc = nb_desc;
1101 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1102 rxq->queue_id = queue_idx;
1103 rxq->offloads = offloads;
1105 rxq->reg_idx = vsi->base_queue + queue_idx;
1106 rxq->port_id = dev->data->port_id;
1107 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1108 rxq->crc_len = RTE_ETHER_CRC_LEN;
1112 rxq->drop_en = rx_conf->rx_drop_en;
1114 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1115 rxq->proto_xtr = pf->proto_xtr != NULL ?
1116 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1118 /* Allocate the maximun number of RX ring hardware descriptor. */
1119 len = ICE_MAX_RING_DESC;
1122 * Allocating a little more memory because vectorized/bulk_alloc Rx
1123 * functions doesn't check boundaries each time.
1125 len += ICE_RX_MAX_BURST;
1127 /* Allocate the maximum number of RX ring hardware descriptor. */
1128 ring_size = sizeof(union ice_rx_flex_desc) * len;
1129 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1130 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1131 ring_size, ICE_RING_BASE_ALIGN,
1134 ice_rx_queue_release(rxq);
1135 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1140 /* Zero all the descriptors in the ring. */
1141 memset(rz->addr, 0, ring_size);
1143 rxq->rx_ring_dma = rz->iova;
1144 rxq->rx_ring = rz->addr;
1146 /* always reserve more for bulk alloc */
1147 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1149 /* Allocate the software ring. */
1150 rxq->sw_ring = rte_zmalloc_socket(NULL,
1151 sizeof(struct ice_rx_entry) * len,
1152 RTE_CACHE_LINE_SIZE,
1154 if (!rxq->sw_ring) {
1155 ice_rx_queue_release(rxq);
1156 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1160 ice_reset_rx_queue(rxq);
1162 dev->data->rx_queues[queue_idx] = rxq;
1163 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1165 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1167 if (!use_def_burst_func) {
1168 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1169 "satisfied. Rx Burst Bulk Alloc function will be "
1170 "used on port=%d, queue=%d.",
1171 rxq->port_id, rxq->queue_id);
1173 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1174 "not satisfied, Scattered Rx is requested. "
1175 "on port=%d, queue=%d.",
1176 rxq->port_id, rxq->queue_id);
1177 ad->rx_bulk_alloc_allowed = false;
1184 ice_rx_queue_release(void *rxq)
1186 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1189 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1194 rte_free(q->sw_ring);
1195 rte_memzone_free(q->mz);
1200 ice_tx_queue_setup(struct rte_eth_dev *dev,
1203 unsigned int socket_id,
1204 const struct rte_eth_txconf *tx_conf)
1206 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1207 struct ice_vsi *vsi = pf->main_vsi;
1208 struct ice_tx_queue *txq;
1209 const struct rte_memzone *tz;
1211 uint16_t tx_rs_thresh, tx_free_thresh;
1214 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1216 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1217 nb_desc > ICE_MAX_RING_DESC ||
1218 nb_desc < ICE_MIN_RING_DESC) {
1219 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1220 "invalid", nb_desc);
1225 * The following two parameters control the setting of the RS bit on
1226 * transmit descriptors. TX descriptors will have their RS bit set
1227 * after txq->tx_rs_thresh descriptors have been used. The TX
1228 * descriptor ring will be cleaned after txq->tx_free_thresh
1229 * descriptors are used or if the number of descriptors required to
1230 * transmit a packet is greater than the number of free TX descriptors.
1232 * The following constraints must be satisfied:
1233 * - tx_rs_thresh must be greater than 0.
1234 * - tx_rs_thresh must be less than the size of the ring minus 2.
1235 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1236 * - tx_rs_thresh must be a divisor of the ring size.
1237 * - tx_free_thresh must be greater than 0.
1238 * - tx_free_thresh must be less than the size of the ring minus 3.
1239 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1241 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1242 * race condition, hence the maximum threshold constraints. When set
1243 * to zero use default values.
1245 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1246 tx_conf->tx_free_thresh :
1247 ICE_DEFAULT_TX_FREE_THRESH);
1248 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1250 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1251 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1252 if (tx_conf->tx_rs_thresh)
1253 tx_rs_thresh = tx_conf->tx_rs_thresh;
1254 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1255 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1256 "exceed nb_desc. (tx_rs_thresh=%u "
1257 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1258 (unsigned int)tx_rs_thresh,
1259 (unsigned int)tx_free_thresh,
1260 (unsigned int)nb_desc,
1261 (int)dev->data->port_id,
1265 if (tx_rs_thresh >= (nb_desc - 2)) {
1266 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1267 "number of TX descriptors minus 2. "
1268 "(tx_rs_thresh=%u port=%d queue=%d)",
1269 (unsigned int)tx_rs_thresh,
1270 (int)dev->data->port_id,
1274 if (tx_free_thresh >= (nb_desc - 3)) {
1275 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1276 "tx_free_thresh must be less than the "
1277 "number of TX descriptors minus 3. "
1278 "(tx_free_thresh=%u port=%d queue=%d)",
1279 (unsigned int)tx_free_thresh,
1280 (int)dev->data->port_id,
1284 if (tx_rs_thresh > tx_free_thresh) {
1285 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1286 "equal to tx_free_thresh. (tx_free_thresh=%u"
1287 " tx_rs_thresh=%u port=%d queue=%d)",
1288 (unsigned int)tx_free_thresh,
1289 (unsigned int)tx_rs_thresh,
1290 (int)dev->data->port_id,
1294 if ((nb_desc % tx_rs_thresh) != 0) {
1295 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1296 "number of TX descriptors. (tx_rs_thresh=%u"
1297 " port=%d queue=%d)",
1298 (unsigned int)tx_rs_thresh,
1299 (int)dev->data->port_id,
1303 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1304 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1305 "tx_rs_thresh is greater than 1. "
1306 "(tx_rs_thresh=%u port=%d queue=%d)",
1307 (unsigned int)tx_rs_thresh,
1308 (int)dev->data->port_id,
1313 /* Free memory if needed. */
1314 if (dev->data->tx_queues[queue_idx]) {
1315 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1316 dev->data->tx_queues[queue_idx] = NULL;
1319 /* Allocate the TX queue data structure. */
1320 txq = rte_zmalloc_socket(NULL,
1321 sizeof(struct ice_tx_queue),
1322 RTE_CACHE_LINE_SIZE,
1325 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1326 "tx queue structure");
1330 /* Allocate TX hardware ring descriptors. */
1331 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1332 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1333 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1334 ring_size, ICE_RING_BASE_ALIGN,
1337 ice_tx_queue_release(txq);
1338 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1343 txq->nb_tx_desc = nb_desc;
1344 txq->tx_rs_thresh = tx_rs_thresh;
1345 txq->tx_free_thresh = tx_free_thresh;
1346 txq->pthresh = tx_conf->tx_thresh.pthresh;
1347 txq->hthresh = tx_conf->tx_thresh.hthresh;
1348 txq->wthresh = tx_conf->tx_thresh.wthresh;
1349 txq->queue_id = queue_idx;
1351 txq->reg_idx = vsi->base_queue + queue_idx;
1352 txq->port_id = dev->data->port_id;
1353 txq->offloads = offloads;
1355 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1357 txq->tx_ring_dma = tz->iova;
1358 txq->tx_ring = tz->addr;
1360 /* Allocate software ring */
1362 rte_zmalloc_socket(NULL,
1363 sizeof(struct ice_tx_entry) * nb_desc,
1364 RTE_CACHE_LINE_SIZE,
1366 if (!txq->sw_ring) {
1367 ice_tx_queue_release(txq);
1368 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1372 ice_reset_tx_queue(txq);
1374 dev->data->tx_queues[queue_idx] = txq;
1375 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1376 ice_set_tx_function_flag(dev, txq);
1382 ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1384 ice_rx_queue_release(dev->data->rx_queues[qid]);
1388 ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1390 ice_tx_queue_release(dev->data->tx_queues[qid]);
1394 ice_tx_queue_release(void *txq)
1396 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1399 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1404 rte_free(q->sw_ring);
1405 rte_memzone_free(q->mz);
1410 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1411 struct rte_eth_rxq_info *qinfo)
1413 struct ice_rx_queue *rxq;
1415 rxq = dev->data->rx_queues[queue_id];
1417 qinfo->mp = rxq->mp;
1418 qinfo->scattered_rx = dev->data->scattered_rx;
1419 qinfo->nb_desc = rxq->nb_rx_desc;
1421 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1422 qinfo->conf.rx_drop_en = rxq->drop_en;
1423 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1427 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1428 struct rte_eth_txq_info *qinfo)
1430 struct ice_tx_queue *txq;
1432 txq = dev->data->tx_queues[queue_id];
1434 qinfo->nb_desc = txq->nb_tx_desc;
1436 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1437 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1438 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1440 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1441 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1442 qinfo->conf.offloads = txq->offloads;
1443 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1447 ice_rx_queue_count(void *rx_queue)
1449 #define ICE_RXQ_SCAN_INTERVAL 4
1450 volatile union ice_rx_flex_desc *rxdp;
1451 struct ice_rx_queue *rxq;
1455 rxdp = &rxq->rx_ring[rxq->rx_tail];
1456 while ((desc < rxq->nb_rx_desc) &&
1457 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1458 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1460 * Check the DD bit of a rx descriptor of each 4 in a group,
1461 * to avoid checking too frequently and downgrading performance
1464 desc += ICE_RXQ_SCAN_INTERVAL;
1465 rxdp += ICE_RXQ_SCAN_INTERVAL;
1466 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1467 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1468 desc - rxq->nb_rx_desc]);
1474 #define ICE_RX_FLEX_ERR0_BITS \
1475 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1476 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1477 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1478 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1479 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1480 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1482 /* Rx L3/L4 checksum */
1483 static inline uint64_t
1484 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1488 /* check if HW has decoded the packet and checksum */
1489 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1492 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1493 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1497 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1498 flags |= PKT_RX_IP_CKSUM_BAD;
1500 flags |= PKT_RX_IP_CKSUM_GOOD;
1502 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1503 flags |= PKT_RX_L4_CKSUM_BAD;
1505 flags |= PKT_RX_L4_CKSUM_GOOD;
1507 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1508 flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1510 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1511 flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1513 flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1519 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1521 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1522 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1523 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1525 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1526 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1527 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1532 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1533 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1534 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1535 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1536 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1537 mb->vlan_tci_outer = mb->vlan_tci;
1538 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1539 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1540 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1541 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1543 mb->vlan_tci_outer = 0;
1546 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1547 mb->vlan_tci, mb->vlan_tci_outer);
1550 #define ICE_LOOK_AHEAD 8
1551 #if (ICE_LOOK_AHEAD != 8)
1552 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1555 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1557 volatile union ice_rx_flex_desc *rxdp;
1558 struct ice_rx_entry *rxep;
1559 struct rte_mbuf *mb;
1562 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1563 int32_t i, j, nb_rx = 0;
1564 uint64_t pkt_flags = 0;
1565 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1566 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1567 struct ice_vsi *vsi = rxq->vsi;
1568 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1570 struct ice_adapter *ad = rxq->vsi->adapter;
1572 rxdp = &rxq->rx_ring[rxq->rx_tail];
1573 rxep = &rxq->sw_ring[rxq->rx_tail];
1575 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1577 /* Make sure there is at least 1 packet to receive */
1578 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1582 * Scan LOOK_AHEAD descriptors at a time to determine which
1583 * descriptors reference packets that are ready to be received.
1585 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1586 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1587 /* Read desc statuses backwards to avoid race condition */
1588 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1589 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1593 /* Compute how many status bits were set */
1594 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1595 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1599 /* Translate descriptor info to mbuf parameters */
1600 for (j = 0; j < nb_dd; j++) {
1602 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1603 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1604 mb->data_len = pkt_len;
1605 mb->pkt_len = pkt_len;
1607 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1608 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1609 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1610 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1611 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1612 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1613 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1614 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1615 ts_ns = ice_tstamp_convert_32b_64b(hw,
1616 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
1617 if (ice_timestamp_dynflag > 0) {
1618 *RTE_MBUF_DYNFIELD(mb,
1619 ice_timestamp_dynfield_offset,
1620 rte_mbuf_timestamp_t *) = ts_ns;
1621 mb->ol_flags |= ice_timestamp_dynflag;
1625 if (ad->ptp_ena && ((mb->packet_type &
1626 RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1628 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1629 mb->timesync = rxq->queue_id;
1630 pkt_flags |= PKT_RX_IEEE1588_PTP;
1633 mb->ol_flags |= pkt_flags;
1636 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1637 rxq->rx_stage[i + j] = rxep[j].mbuf;
1639 if (nb_dd != ICE_LOOK_AHEAD)
1643 /* Clear software ring entries */
1644 for (i = 0; i < nb_rx; i++)
1645 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1647 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1648 "port_id=%u, queue_id=%u, nb_rx=%d",
1649 rxq->port_id, rxq->queue_id, nb_rx);
1654 static inline uint16_t
1655 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1656 struct rte_mbuf **rx_pkts,
1660 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1662 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1664 for (i = 0; i < nb_pkts; i++)
1665 rx_pkts[i] = stage[i];
1667 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1668 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1674 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1676 volatile union ice_rx_flex_desc *rxdp;
1677 struct ice_rx_entry *rxep;
1678 struct rte_mbuf *mb;
1679 uint16_t alloc_idx, i;
1683 /* Allocate buffers in bulk */
1684 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1685 (rxq->rx_free_thresh - 1));
1686 rxep = &rxq->sw_ring[alloc_idx];
1687 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1688 rxq->rx_free_thresh);
1689 if (unlikely(diag != 0)) {
1690 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1694 rxdp = &rxq->rx_ring[alloc_idx];
1695 for (i = 0; i < rxq->rx_free_thresh; i++) {
1696 if (likely(i < (rxq->rx_free_thresh - 1)))
1697 /* Prefetch next mbuf */
1698 rte_prefetch0(rxep[i + 1].mbuf);
1701 rte_mbuf_refcnt_set(mb, 1);
1703 mb->data_off = RTE_PKTMBUF_HEADROOM;
1705 mb->port = rxq->port_id;
1706 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1707 rxdp[i].read.hdr_addr = 0;
1708 rxdp[i].read.pkt_addr = dma_addr;
1711 /* Update rx tail regsiter */
1712 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1714 rxq->rx_free_trigger =
1715 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1716 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1717 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1722 static inline uint16_t
1723 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1725 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1731 if (rxq->rx_nb_avail)
1732 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1734 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1735 rxq->rx_next_avail = 0;
1736 rxq->rx_nb_avail = nb_rx;
1737 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1739 if (rxq->rx_tail > rxq->rx_free_trigger) {
1740 if (ice_rx_alloc_bufs(rxq) != 0) {
1743 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
1744 rxq->rx_free_thresh;
1745 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1746 "port_id=%u, queue_id=%u",
1747 rxq->port_id, rxq->queue_id);
1748 rxq->rx_nb_avail = 0;
1749 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1750 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1751 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1757 if (rxq->rx_tail >= rxq->nb_rx_desc)
1760 if (rxq->rx_nb_avail)
1761 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1767 ice_recv_pkts_bulk_alloc(void *rx_queue,
1768 struct rte_mbuf **rx_pkts,
1775 if (unlikely(nb_pkts == 0))
1778 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1779 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1782 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1783 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1784 nb_rx = (uint16_t)(nb_rx + count);
1785 nb_pkts = (uint16_t)(nb_pkts - count);
1794 ice_recv_scattered_pkts(void *rx_queue,
1795 struct rte_mbuf **rx_pkts,
1798 struct ice_rx_queue *rxq = rx_queue;
1799 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1800 volatile union ice_rx_flex_desc *rxdp;
1801 union ice_rx_flex_desc rxd;
1802 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1803 struct ice_rx_entry *rxe;
1804 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1805 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1806 struct rte_mbuf *nmb; /* new allocated mbuf */
1807 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1808 uint16_t rx_id = rxq->rx_tail;
1810 uint16_t nb_hold = 0;
1811 uint16_t rx_packet_len;
1812 uint16_t rx_stat_err0;
1815 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1816 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1817 struct ice_vsi *vsi = rxq->vsi;
1818 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1820 struct ice_adapter *ad = rxq->vsi->adapter;
1822 while (nb_rx < nb_pkts) {
1823 rxdp = &rx_ring[rx_id];
1824 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1826 /* Check the DD bit first */
1827 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1831 nmb = rte_mbuf_raw_alloc(rxq->mp);
1832 if (unlikely(!nmb)) {
1833 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
1836 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1839 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1841 if (unlikely(rx_id == rxq->nb_rx_desc))
1844 /* Prefetch next mbuf */
1845 rte_prefetch0(sw_ring[rx_id].mbuf);
1848 * When next RX descriptor is on a cache line boundary,
1849 * prefetch the next 4 RX descriptors and next 8 pointers
1852 if ((rx_id & 0x3) == 0) {
1853 rte_prefetch0(&rx_ring[rx_id]);
1854 rte_prefetch0(&sw_ring[rx_id]);
1860 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1862 /* Set data buffer address and data length of the mbuf */
1863 rxdp->read.hdr_addr = 0;
1864 rxdp->read.pkt_addr = dma_addr;
1865 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1866 ICE_RX_FLX_DESC_PKT_LEN_M;
1867 rxm->data_len = rx_packet_len;
1868 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1871 * If this is the first buffer of the received packet, set the
1872 * pointer to the first mbuf of the packet and initialize its
1873 * context. Otherwise, update the total length and the number
1874 * of segments of the current scattered packet, and update the
1875 * pointer to the last mbuf of the current packet.
1879 first_seg->nb_segs = 1;
1880 first_seg->pkt_len = rx_packet_len;
1882 first_seg->pkt_len =
1883 (uint16_t)(first_seg->pkt_len +
1885 first_seg->nb_segs++;
1886 last_seg->next = rxm;
1890 * If this is not the last buffer of the received packet,
1891 * update the pointer to the last mbuf of the current scattered
1892 * packet and continue to parse the RX ring.
1894 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1900 * This is the last buffer of the received packet. If the CRC
1901 * is not stripped by the hardware:
1902 * - Subtract the CRC length from the total packet length.
1903 * - If the last buffer only contains the whole CRC or a part
1904 * of it, free the mbuf associated to the last buffer. If part
1905 * of the CRC is also contained in the previous mbuf, subtract
1906 * the length of that CRC part from the data length of the
1910 if (unlikely(rxq->crc_len > 0)) {
1911 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1912 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1913 rte_pktmbuf_free_seg(rxm);
1914 first_seg->nb_segs--;
1915 last_seg->data_len =
1916 (uint16_t)(last_seg->data_len -
1917 (RTE_ETHER_CRC_LEN - rx_packet_len));
1918 last_seg->next = NULL;
1920 rxm->data_len = (uint16_t)(rx_packet_len -
1924 first_seg->port = rxq->port_id;
1925 first_seg->ol_flags = 0;
1926 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1927 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1928 ice_rxd_to_vlan_tci(first_seg, &rxd);
1929 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1930 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1931 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1932 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1933 ts_ns = ice_tstamp_convert_32b_64b(hw,
1934 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
1935 if (ice_timestamp_dynflag > 0) {
1936 *RTE_MBUF_DYNFIELD(first_seg,
1937 ice_timestamp_dynfield_offset,
1938 rte_mbuf_timestamp_t *) = ts_ns;
1939 first_seg->ol_flags |= ice_timestamp_dynflag;
1943 if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
1944 == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1946 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
1947 first_seg->timesync = rxq->queue_id;
1948 pkt_flags |= PKT_RX_IEEE1588_PTP;
1951 first_seg->ol_flags |= pkt_flags;
1952 /* Prefetch data of first segment, if configured to do so. */
1953 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1954 first_seg->data_off));
1955 rx_pkts[nb_rx++] = first_seg;
1959 /* Record index of the next RX descriptor to probe. */
1960 rxq->rx_tail = rx_id;
1961 rxq->pkt_first_seg = first_seg;
1962 rxq->pkt_last_seg = last_seg;
1965 * If the number of free RX descriptors is greater than the RX free
1966 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1967 * register. Update the RDT with the value of the last processed RX
1968 * descriptor minus 1, to guarantee that the RDT register is never
1969 * equal to the RDH register, which creates a "full" ring situtation
1970 * from the hardware point of view.
1972 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1973 if (nb_hold > rxq->rx_free_thresh) {
1974 rx_id = (uint16_t)(rx_id == 0 ?
1975 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1976 /* write TAIL register */
1977 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1980 rxq->nb_rx_hold = nb_hold;
1982 /* return received packet in the burst */
1987 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1989 struct ice_adapter *ad =
1990 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1991 const uint32_t *ptypes;
1993 static const uint32_t ptypes_os[] = {
1994 /* refers to ice_get_default_pkt_type() */
1996 RTE_PTYPE_L2_ETHER_TIMESYNC,
1997 RTE_PTYPE_L2_ETHER_LLDP,
1998 RTE_PTYPE_L2_ETHER_ARP,
1999 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2000 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2003 RTE_PTYPE_L4_NONFRAG,
2007 RTE_PTYPE_TUNNEL_GRENAT,
2008 RTE_PTYPE_TUNNEL_IP,
2009 RTE_PTYPE_INNER_L2_ETHER,
2010 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2011 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2012 RTE_PTYPE_INNER_L4_FRAG,
2013 RTE_PTYPE_INNER_L4_ICMP,
2014 RTE_PTYPE_INNER_L4_NONFRAG,
2015 RTE_PTYPE_INNER_L4_SCTP,
2016 RTE_PTYPE_INNER_L4_TCP,
2017 RTE_PTYPE_INNER_L4_UDP,
2021 static const uint32_t ptypes_comms[] = {
2022 /* refers to ice_get_default_pkt_type() */
2024 RTE_PTYPE_L2_ETHER_TIMESYNC,
2025 RTE_PTYPE_L2_ETHER_LLDP,
2026 RTE_PTYPE_L2_ETHER_ARP,
2027 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2028 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2031 RTE_PTYPE_L4_NONFRAG,
2035 RTE_PTYPE_TUNNEL_GRENAT,
2036 RTE_PTYPE_TUNNEL_IP,
2037 RTE_PTYPE_INNER_L2_ETHER,
2038 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2039 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2040 RTE_PTYPE_INNER_L4_FRAG,
2041 RTE_PTYPE_INNER_L4_ICMP,
2042 RTE_PTYPE_INNER_L4_NONFRAG,
2043 RTE_PTYPE_INNER_L4_SCTP,
2044 RTE_PTYPE_INNER_L4_TCP,
2045 RTE_PTYPE_INNER_L4_UDP,
2046 RTE_PTYPE_TUNNEL_GTPC,
2047 RTE_PTYPE_TUNNEL_GTPU,
2048 RTE_PTYPE_L2_ETHER_PPPOE,
2052 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
2053 ptypes = ptypes_comms;
2057 if (dev->rx_pkt_burst == ice_recv_pkts ||
2058 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
2059 dev->rx_pkt_burst == ice_recv_scattered_pkts)
2063 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
2064 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
2065 #ifdef CC_AVX512_SUPPORT
2066 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
2067 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
2068 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
2069 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2071 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2072 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
2073 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
2074 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
2082 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2084 volatile union ice_rx_flex_desc *rxdp;
2085 struct ice_rx_queue *rxq = rx_queue;
2088 if (unlikely(offset >= rxq->nb_rx_desc))
2091 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2092 return RTE_ETH_RX_DESC_UNAVAIL;
2094 desc = rxq->rx_tail + offset;
2095 if (desc >= rxq->nb_rx_desc)
2096 desc -= rxq->nb_rx_desc;
2098 rxdp = &rxq->rx_ring[desc];
2099 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2100 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2101 return RTE_ETH_RX_DESC_DONE;
2103 return RTE_ETH_RX_DESC_AVAIL;
2107 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2109 struct ice_tx_queue *txq = tx_queue;
2110 volatile uint64_t *status;
2111 uint64_t mask, expect;
2114 if (unlikely(offset >= txq->nb_tx_desc))
2117 desc = txq->tx_tail + offset;
2118 /* go to next desc that has the RS bit */
2119 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2121 if (desc >= txq->nb_tx_desc) {
2122 desc -= txq->nb_tx_desc;
2123 if (desc >= txq->nb_tx_desc)
2124 desc -= txq->nb_tx_desc;
2127 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2128 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2129 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2130 ICE_TXD_QW1_DTYPE_S);
2131 if ((*status & mask) == expect)
2132 return RTE_ETH_TX_DESC_DONE;
2134 return RTE_ETH_TX_DESC_FULL;
2138 ice_free_queues(struct rte_eth_dev *dev)
2142 PMD_INIT_FUNC_TRACE();
2144 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2145 if (!dev->data->rx_queues[i])
2147 ice_rx_queue_release(dev->data->rx_queues[i]);
2148 dev->data->rx_queues[i] = NULL;
2150 dev->data->nb_rx_queues = 0;
2152 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2153 if (!dev->data->tx_queues[i])
2155 ice_tx_queue_release(dev->data->tx_queues[i]);
2156 dev->data->tx_queues[i] = NULL;
2158 dev->data->nb_tx_queues = 0;
2161 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
2162 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
2165 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2167 struct ice_tx_queue *txq;
2168 const struct rte_memzone *tz = NULL;
2170 struct rte_eth_dev *dev;
2173 PMD_DRV_LOG(ERR, "PF is not available");
2177 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2179 /* Allocate the TX queue data structure. */
2180 txq = rte_zmalloc_socket("ice fdir tx queue",
2181 sizeof(struct ice_tx_queue),
2182 RTE_CACHE_LINE_SIZE,
2185 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2186 "tx queue structure.");
2190 /* Allocate TX hardware ring descriptors. */
2191 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2192 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2194 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2195 ICE_FDIR_QUEUE_ID, ring_size,
2196 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2198 ice_tx_queue_release(txq);
2199 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2204 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2205 txq->queue_id = ICE_FDIR_QUEUE_ID;
2206 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2207 txq->vsi = pf->fdir.fdir_vsi;
2209 txq->tx_ring_dma = tz->iova;
2210 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2212 * don't need to allocate software ring and reset for the fdir
2213 * program queue just set the queue has been configured.
2218 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2224 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2226 struct ice_rx_queue *rxq;
2227 const struct rte_memzone *rz = NULL;
2229 struct rte_eth_dev *dev;
2232 PMD_DRV_LOG(ERR, "PF is not available");
2236 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2238 /* Allocate the RX queue data structure. */
2239 rxq = rte_zmalloc_socket("ice fdir rx queue",
2240 sizeof(struct ice_rx_queue),
2241 RTE_CACHE_LINE_SIZE,
2244 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2245 "rx queue structure.");
2249 /* Allocate RX hardware ring descriptors. */
2250 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2251 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2253 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2254 ICE_FDIR_QUEUE_ID, ring_size,
2255 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2257 ice_rx_queue_release(rxq);
2258 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2263 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2264 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2265 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2266 rxq->vsi = pf->fdir.fdir_vsi;
2268 rxq->rx_ring_dma = rz->iova;
2269 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2270 sizeof(union ice_32byte_rx_desc));
2271 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2274 * Don't need to allocate software ring and reset for the fdir
2275 * rx queue, just set the queue has been configured.
2280 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2286 ice_recv_pkts(void *rx_queue,
2287 struct rte_mbuf **rx_pkts,
2290 struct ice_rx_queue *rxq = rx_queue;
2291 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2292 volatile union ice_rx_flex_desc *rxdp;
2293 union ice_rx_flex_desc rxd;
2294 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2295 struct ice_rx_entry *rxe;
2296 struct rte_mbuf *nmb; /* new allocated mbuf */
2297 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2298 uint16_t rx_id = rxq->rx_tail;
2300 uint16_t nb_hold = 0;
2301 uint16_t rx_packet_len;
2302 uint16_t rx_stat_err0;
2305 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2306 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2307 struct ice_vsi *vsi = rxq->vsi;
2308 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2310 struct ice_adapter *ad = rxq->vsi->adapter;
2312 while (nb_rx < nb_pkts) {
2313 rxdp = &rx_ring[rx_id];
2314 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2316 /* Check the DD bit first */
2317 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2321 nmb = rte_mbuf_raw_alloc(rxq->mp);
2322 if (unlikely(!nmb)) {
2323 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2326 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2329 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2331 if (unlikely(rx_id == rxq->nb_rx_desc))
2336 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2339 * fill the read format of descriptor with physic address in
2340 * new allocated mbuf: nmb
2342 rxdp->read.hdr_addr = 0;
2343 rxdp->read.pkt_addr = dma_addr;
2345 /* calculate rx_packet_len of the received pkt */
2346 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2347 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2349 /* fill old mbuf with received descriptor: rxd */
2350 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2351 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2354 rxm->pkt_len = rx_packet_len;
2355 rxm->data_len = rx_packet_len;
2356 rxm->port = rxq->port_id;
2357 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2358 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2359 ice_rxd_to_vlan_tci(rxm, &rxd);
2360 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
2361 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2362 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2363 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
2364 ts_ns = ice_tstamp_convert_32b_64b(hw,
2365 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
2366 if (ice_timestamp_dynflag > 0) {
2367 *RTE_MBUF_DYNFIELD(rxm,
2368 ice_timestamp_dynfield_offset,
2369 rte_mbuf_timestamp_t *) = ts_ns;
2370 rxm->ol_flags |= ice_timestamp_dynflag;
2374 if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
2375 RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2377 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2378 rxm->timesync = rxq->queue_id;
2379 pkt_flags |= PKT_RX_IEEE1588_PTP;
2382 rxm->ol_flags |= pkt_flags;
2383 /* copy old mbuf to rx_pkts */
2384 rx_pkts[nb_rx++] = rxm;
2386 rxq->rx_tail = rx_id;
2388 * If the number of free RX descriptors is greater than the RX free
2389 * threshold of the queue, advance the receive tail register of queue.
2390 * Update that register with the value of the last processed RX
2391 * descriptor minus 1.
2393 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2394 if (nb_hold > rxq->rx_free_thresh) {
2395 rx_id = (uint16_t)(rx_id == 0 ?
2396 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2397 /* write TAIL register */
2398 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2401 rxq->nb_rx_hold = nb_hold;
2403 /* return received packet in the burst */
2408 ice_parse_tunneling_params(uint64_t ol_flags,
2409 union ice_tx_offload tx_offload,
2410 uint32_t *cd_tunneling)
2412 /* EIPT: External (outer) IP header type */
2413 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2414 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2415 else if (ol_flags & PKT_TX_OUTER_IPV4)
2416 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2417 else if (ol_flags & PKT_TX_OUTER_IPV6)
2418 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2420 /* EIPLEN: External (outer) IP header length, in DWords */
2421 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2422 ICE_TXD_CTX_QW0_EIPLEN_S;
2424 /* L4TUNT: L4 Tunneling Type */
2425 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2426 case PKT_TX_TUNNEL_IPIP:
2427 /* for non UDP / GRE tunneling, set to 00b */
2429 case PKT_TX_TUNNEL_VXLAN:
2430 case PKT_TX_TUNNEL_GTP:
2431 case PKT_TX_TUNNEL_GENEVE:
2432 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2434 case PKT_TX_TUNNEL_GRE:
2435 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2438 PMD_TX_LOG(ERR, "Tunnel type not supported");
2442 /* L4TUNLEN: L4 Tunneling Length, in Words
2444 * We depend on app to set rte_mbuf.l2_len correctly.
2445 * For IP in GRE it should be set to the length of the GRE
2447 * For MAC in GRE or MAC in UDP it should be set to the length
2448 * of the GRE or UDP headers plus the inner MAC up to including
2449 * its last Ethertype.
2450 * If MPLS labels exists, it should include them as well.
2452 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2453 ICE_TXD_CTX_QW0_NATLEN_S;
2456 * Calculate the tunneling UDP checksum.
2457 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2459 if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
2460 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2461 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2465 ice_txd_enable_checksum(uint64_t ol_flags,
2467 uint32_t *td_offset,
2468 union ice_tx_offload tx_offload)
2471 if (ol_flags & PKT_TX_TUNNEL_MASK)
2472 *td_offset |= (tx_offload.outer_l2_len >> 1)
2473 << ICE_TX_DESC_LEN_MACLEN_S;
2475 *td_offset |= (tx_offload.l2_len >> 1)
2476 << ICE_TX_DESC_LEN_MACLEN_S;
2478 /* Enable L3 checksum offloads */
2479 if (ol_flags & PKT_TX_IP_CKSUM) {
2480 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2481 *td_offset |= (tx_offload.l3_len >> 2) <<
2482 ICE_TX_DESC_LEN_IPLEN_S;
2483 } else if (ol_flags & PKT_TX_IPV4) {
2484 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2485 *td_offset |= (tx_offload.l3_len >> 2) <<
2486 ICE_TX_DESC_LEN_IPLEN_S;
2487 } else if (ol_flags & PKT_TX_IPV6) {
2488 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2489 *td_offset |= (tx_offload.l3_len >> 2) <<
2490 ICE_TX_DESC_LEN_IPLEN_S;
2493 if (ol_flags & PKT_TX_TCP_SEG) {
2494 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2495 *td_offset |= (tx_offload.l4_len >> 2) <<
2496 ICE_TX_DESC_LEN_L4_LEN_S;
2500 /* Enable L4 checksum offloads */
2501 switch (ol_flags & PKT_TX_L4_MASK) {
2502 case PKT_TX_TCP_CKSUM:
2503 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2504 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2505 ICE_TX_DESC_LEN_L4_LEN_S;
2507 case PKT_TX_SCTP_CKSUM:
2508 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2509 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2510 ICE_TX_DESC_LEN_L4_LEN_S;
2512 case PKT_TX_UDP_CKSUM:
2513 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2514 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2515 ICE_TX_DESC_LEN_L4_LEN_S;
2523 ice_xmit_cleanup(struct ice_tx_queue *txq)
2525 struct ice_tx_entry *sw_ring = txq->sw_ring;
2526 volatile struct ice_tx_desc *txd = txq->tx_ring;
2527 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2528 uint16_t nb_tx_desc = txq->nb_tx_desc;
2529 uint16_t desc_to_clean_to;
2530 uint16_t nb_tx_to_clean;
2532 /* Determine the last descriptor needing to be cleaned */
2533 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2534 if (desc_to_clean_to >= nb_tx_desc)
2535 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2537 /* Check to make sure the last descriptor to clean is done */
2538 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2539 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2540 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2541 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2542 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2544 txq->port_id, txq->queue_id,
2545 txd[desc_to_clean_to].cmd_type_offset_bsz);
2546 /* Failed to clean any descriptors */
2550 /* Figure out how many descriptors will be cleaned */
2551 if (last_desc_cleaned > desc_to_clean_to)
2552 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2555 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2558 /* The last descriptor to clean is done, so that means all the
2559 * descriptors from the last descriptor that was cleaned
2560 * up to the last descriptor with the RS bit set
2561 * are done. Only reset the threshold descriptor.
2563 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2565 /* Update the txq to reflect the last descriptor that was cleaned */
2566 txq->last_desc_cleaned = desc_to_clean_to;
2567 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2572 /* Construct the tx flags */
2573 static inline uint64_t
2574 ice_build_ctob(uint32_t td_cmd,
2579 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2580 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2581 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2582 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2583 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2586 /* Check if the context descriptor is needed for TX offloading */
2587 static inline uint16_t
2588 ice_calc_context_desc(uint64_t flags)
2590 static uint64_t mask = PKT_TX_TCP_SEG |
2592 PKT_TX_OUTER_IP_CKSUM |
2593 PKT_TX_TUNNEL_MASK |
2594 PKT_TX_IEEE1588_TMST;
2596 return (flags & mask) ? 1 : 0;
2599 /* set ice TSO context descriptor */
2600 static inline uint64_t
2601 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2603 uint64_t ctx_desc = 0;
2604 uint32_t cd_cmd, hdr_len, cd_tso_len;
2606 if (!tx_offload.l4_len) {
2607 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2611 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2612 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2613 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2615 cd_cmd = ICE_TX_CTX_DESC_TSO;
2616 cd_tso_len = mbuf->pkt_len - hdr_len;
2617 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2618 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2619 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2624 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2625 #define ICE_MAX_DATA_PER_TXD \
2626 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2627 /* Calculate the number of TX descriptors needed for each pkt */
2628 static inline uint16_t
2629 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2631 struct rte_mbuf *txd = tx_pkt;
2634 while (txd != NULL) {
2635 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2643 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2645 struct ice_tx_queue *txq;
2646 volatile struct ice_tx_desc *tx_ring;
2647 volatile struct ice_tx_desc *txd;
2648 struct ice_tx_entry *sw_ring;
2649 struct ice_tx_entry *txe, *txn;
2650 struct rte_mbuf *tx_pkt;
2651 struct rte_mbuf *m_seg;
2652 uint32_t cd_tunneling_params;
2657 uint32_t td_cmd = 0;
2658 uint32_t td_offset = 0;
2659 uint32_t td_tag = 0;
2662 uint64_t buf_dma_addr;
2664 union ice_tx_offload tx_offload = {0};
2667 sw_ring = txq->sw_ring;
2668 tx_ring = txq->tx_ring;
2669 tx_id = txq->tx_tail;
2670 txe = &sw_ring[tx_id];
2672 /* Check if the descriptor ring needs to be cleaned. */
2673 if (txq->nb_tx_free < txq->tx_free_thresh)
2674 (void)ice_xmit_cleanup(txq);
2676 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2677 tx_pkt = *tx_pkts++;
2682 ol_flags = tx_pkt->ol_flags;
2683 tx_offload.l2_len = tx_pkt->l2_len;
2684 tx_offload.l3_len = tx_pkt->l3_len;
2685 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2686 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2687 tx_offload.l4_len = tx_pkt->l4_len;
2688 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2689 /* Calculate the number of context descriptors needed. */
2690 nb_ctx = ice_calc_context_desc(ol_flags);
2692 /* The number of descriptors that must be allocated for
2693 * a packet equals to the number of the segments of that
2694 * packet plus the number of context descriptor if needed.
2695 * Recalculate the needed tx descs when TSO enabled in case
2696 * the mbuf data size exceeds max data size that hw allows
2699 if (ol_flags & PKT_TX_TCP_SEG)
2700 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2703 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2704 tx_last = (uint16_t)(tx_id + nb_used - 1);
2707 if (tx_last >= txq->nb_tx_desc)
2708 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2710 if (nb_used > txq->nb_tx_free) {
2711 if (ice_xmit_cleanup(txq) != 0) {
2716 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2717 while (nb_used > txq->nb_tx_free) {
2718 if (ice_xmit_cleanup(txq) != 0) {
2727 /* Descriptor based VLAN insertion */
2728 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2729 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2730 td_tag = tx_pkt->vlan_tci;
2733 /* Fill in tunneling parameters if necessary */
2734 cd_tunneling_params = 0;
2735 if (ol_flags & PKT_TX_TUNNEL_MASK)
2736 ice_parse_tunneling_params(ol_flags, tx_offload,
2737 &cd_tunneling_params);
2739 /* Enable checksum offloading */
2740 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2741 ice_txd_enable_checksum(ol_flags, &td_cmd,
2742 &td_offset, tx_offload);
2745 /* Setup TX context descriptor if required */
2746 volatile struct ice_tx_ctx_desc *ctx_txd =
2747 (volatile struct ice_tx_ctx_desc *)
2749 uint16_t cd_l2tag2 = 0;
2750 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2752 txn = &sw_ring[txe->next_id];
2753 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2755 rte_pktmbuf_free_seg(txe->mbuf);
2759 if (ol_flags & PKT_TX_TCP_SEG)
2760 cd_type_cmd_tso_mss |=
2761 ice_set_tso_ctx(tx_pkt, tx_offload);
2762 else if (ol_flags & PKT_TX_IEEE1588_TMST)
2763 cd_type_cmd_tso_mss |=
2764 ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
2765 ICE_TXD_CTX_QW1_CMD_S);
2767 ctx_txd->tunneling_params =
2768 rte_cpu_to_le_32(cd_tunneling_params);
2770 /* TX context descriptor based double VLAN insert */
2771 if (ol_flags & PKT_TX_QINQ) {
2772 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2773 cd_type_cmd_tso_mss |=
2774 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2775 ICE_TXD_CTX_QW1_CMD_S);
2777 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2779 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2781 txe->last_id = tx_last;
2782 tx_id = txe->next_id;
2788 txd = &tx_ring[tx_id];
2789 txn = &sw_ring[txe->next_id];
2792 rte_pktmbuf_free_seg(txe->mbuf);
2795 /* Setup TX Descriptor */
2796 slen = m_seg->data_len;
2797 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2799 while ((ol_flags & PKT_TX_TCP_SEG) &&
2800 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2801 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2802 txd->cmd_type_offset_bsz =
2803 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2804 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2805 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2806 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2807 ICE_TXD_QW1_TX_BUF_SZ_S) |
2808 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2810 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2811 slen -= ICE_MAX_DATA_PER_TXD;
2813 txe->last_id = tx_last;
2814 tx_id = txe->next_id;
2816 txd = &tx_ring[tx_id];
2817 txn = &sw_ring[txe->next_id];
2820 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2821 txd->cmd_type_offset_bsz =
2822 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2823 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2824 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2825 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2826 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2828 txe->last_id = tx_last;
2829 tx_id = txe->next_id;
2831 m_seg = m_seg->next;
2834 /* fill the last descriptor with End of Packet (EOP) bit */
2835 td_cmd |= ICE_TX_DESC_CMD_EOP;
2836 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2837 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2839 /* set RS bit on the last descriptor of one packet */
2840 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2842 "Setting RS bit on TXD id="
2843 "%4u (port=%d queue=%d)",
2844 tx_last, txq->port_id, txq->queue_id);
2846 td_cmd |= ICE_TX_DESC_CMD_RS;
2848 /* Update txq RS bit counters */
2849 txq->nb_tx_used = 0;
2851 txd->cmd_type_offset_bsz |=
2852 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2856 /* update Tail register */
2857 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2858 txq->tx_tail = tx_id;
2863 static __rte_always_inline int
2864 ice_tx_free_bufs(struct ice_tx_queue *txq)
2866 struct ice_tx_entry *txep;
2869 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2870 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2871 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2874 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2876 for (i = 0; i < txq->tx_rs_thresh; i++)
2877 rte_prefetch0((txep + i)->mbuf);
2879 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2880 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2881 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2885 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2886 rte_pktmbuf_free_seg(txep->mbuf);
2891 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2892 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2893 if (txq->tx_next_dd >= txq->nb_tx_desc)
2894 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2896 return txq->tx_rs_thresh;
2900 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2903 struct ice_tx_entry *swr_ring = txq->sw_ring;
2904 uint16_t i, tx_last, tx_id;
2905 uint16_t nb_tx_free_last;
2906 uint16_t nb_tx_to_clean;
2909 /* Start free mbuf from the next of tx_tail */
2910 tx_last = txq->tx_tail;
2911 tx_id = swr_ring[tx_last].next_id;
2913 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2916 nb_tx_to_clean = txq->nb_tx_free;
2917 nb_tx_free_last = txq->nb_tx_free;
2919 free_cnt = txq->nb_tx_desc;
2921 /* Loop through swr_ring to count the amount of
2922 * freeable mubfs and packets.
2924 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2925 for (i = 0; i < nb_tx_to_clean &&
2926 pkt_cnt < free_cnt &&
2927 tx_id != tx_last; i++) {
2928 if (swr_ring[tx_id].mbuf != NULL) {
2929 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2930 swr_ring[tx_id].mbuf = NULL;
2933 * last segment in the packet,
2934 * increment packet count
2936 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2939 tx_id = swr_ring[tx_id].next_id;
2942 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2943 txq->nb_tx_free || tx_id == tx_last)
2946 if (pkt_cnt < free_cnt) {
2947 if (ice_xmit_cleanup(txq))
2950 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2951 nb_tx_free_last = txq->nb_tx_free;
2955 return (int)pkt_cnt;
2960 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2961 uint32_t free_cnt __rte_unused)
2968 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2973 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2974 free_cnt = txq->nb_tx_desc;
2976 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2978 for (i = 0; i < cnt; i += n) {
2979 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2982 n = ice_tx_free_bufs(txq);
2992 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2994 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2995 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2996 struct ice_adapter *ad =
2997 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3000 if (ad->tx_vec_allowed)
3001 return ice_tx_done_cleanup_vec(q, free_cnt);
3003 if (ad->tx_simple_allowed)
3004 return ice_tx_done_cleanup_simple(q, free_cnt);
3006 return ice_tx_done_cleanup_full(q, free_cnt);
3009 /* Populate 4 descriptors with data from 4 mbufs */
3011 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3016 for (i = 0; i < 4; i++, txdp++, pkts++) {
3017 dma_addr = rte_mbuf_data_iova(*pkts);
3018 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3019 txdp->cmd_type_offset_bsz =
3020 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3021 (*pkts)->data_len, 0);
3025 /* Populate 1 descriptor with data from 1 mbuf */
3027 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3031 dma_addr = rte_mbuf_data_iova(*pkts);
3032 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3033 txdp->cmd_type_offset_bsz =
3034 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3035 (*pkts)->data_len, 0);
3039 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
3042 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
3043 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
3044 const int N_PER_LOOP = 4;
3045 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
3046 int mainpart, leftover;
3050 * Process most of the packets in chunks of N pkts. Any
3051 * leftover packets will get processed one at a time.
3053 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
3054 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
3055 for (i = 0; i < mainpart; i += N_PER_LOOP) {
3056 /* Copy N mbuf pointers to the S/W ring */
3057 for (j = 0; j < N_PER_LOOP; ++j)
3058 (txep + i + j)->mbuf = *(pkts + i + j);
3059 tx4(txdp + i, pkts + i);
3062 if (unlikely(leftover > 0)) {
3063 for (i = 0; i < leftover; ++i) {
3064 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
3065 tx1(txdp + mainpart + i, pkts + mainpart + i);
3070 static inline uint16_t
3071 tx_xmit_pkts(struct ice_tx_queue *txq,
3072 struct rte_mbuf **tx_pkts,
3075 volatile struct ice_tx_desc *txr = txq->tx_ring;
3079 * Begin scanning the H/W ring for done descriptors when the number
3080 * of available descriptors drops below tx_free_thresh. For each done
3081 * descriptor, free the associated buffer.
3083 if (txq->nb_tx_free < txq->tx_free_thresh)
3084 ice_tx_free_bufs(txq);
3086 /* Use available descriptor only */
3087 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
3088 if (unlikely(!nb_pkts))
3091 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
3092 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
3093 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
3094 ice_tx_fill_hw_ring(txq, tx_pkts, n);
3095 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3096 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3098 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3102 /* Fill hardware descriptor ring with mbuf data */
3103 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3104 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3106 /* Determin if RS bit needs to be set */
3107 if (txq->tx_tail > txq->tx_next_rs) {
3108 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3109 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3112 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3113 if (txq->tx_next_rs >= txq->nb_tx_desc)
3114 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3117 if (txq->tx_tail >= txq->nb_tx_desc)
3120 /* Update the tx tail register */
3121 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3127 ice_xmit_pkts_simple(void *tx_queue,
3128 struct rte_mbuf **tx_pkts,
3133 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3134 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3138 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3141 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3142 &tx_pkts[nb_tx], num);
3143 nb_tx = (uint16_t)(nb_tx + ret);
3144 nb_pkts = (uint16_t)(nb_pkts - ret);
3153 ice_set_rx_function(struct rte_eth_dev *dev)
3155 PMD_INIT_FUNC_TRACE();
3156 struct ice_adapter *ad =
3157 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3159 struct ice_rx_queue *rxq;
3161 int rx_check_ret = -1;
3163 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3164 ad->rx_use_avx512 = false;
3165 ad->rx_use_avx2 = false;
3166 rx_check_ret = ice_rx_vec_dev_check(dev);
3169 if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3170 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3171 ad->rx_vec_allowed = true;
3172 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3173 rxq = dev->data->rx_queues[i];
3174 if (rxq && ice_rxq_vec_setup(rxq)) {
3175 ad->rx_vec_allowed = false;
3180 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3181 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3182 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3183 #ifdef CC_AVX512_SUPPORT
3184 ad->rx_use_avx512 = true;
3187 "AVX512 is not supported in build env");
3189 if (!ad->rx_use_avx512 &&
3190 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3191 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3192 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3193 ad->rx_use_avx2 = true;
3196 ad->rx_vec_allowed = false;
3200 if (ad->rx_vec_allowed) {
3201 if (dev->data->scattered_rx) {
3202 if (ad->rx_use_avx512) {
3203 #ifdef CC_AVX512_SUPPORT
3204 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3206 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3207 dev->data->port_id);
3209 ice_recv_scattered_pkts_vec_avx512_offload;
3212 "Using AVX512 Vector Scattered Rx (port %d).",
3213 dev->data->port_id);
3215 ice_recv_scattered_pkts_vec_avx512;
3218 } else if (ad->rx_use_avx2) {
3219 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3221 "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
3222 dev->data->port_id);
3224 ice_recv_scattered_pkts_vec_avx2_offload;
3227 "Using AVX2 Vector Scattered Rx (port %d).",
3228 dev->data->port_id);
3230 ice_recv_scattered_pkts_vec_avx2;
3234 "Using Vector Scattered Rx (port %d).",
3235 dev->data->port_id);
3236 dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
3239 if (ad->rx_use_avx512) {
3240 #ifdef CC_AVX512_SUPPORT
3241 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3243 "Using AVX512 OFFLOAD Vector Rx (port %d).",
3244 dev->data->port_id);
3246 ice_recv_pkts_vec_avx512_offload;
3249 "Using AVX512 Vector Rx (port %d).",
3250 dev->data->port_id);
3252 ice_recv_pkts_vec_avx512;
3255 } else if (ad->rx_use_avx2) {
3256 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3258 "Using AVX2 OFFLOAD Vector Rx (port %d).",
3259 dev->data->port_id);
3261 ice_recv_pkts_vec_avx2_offload;
3264 "Using AVX2 Vector Rx (port %d).",
3265 dev->data->port_id);
3267 ice_recv_pkts_vec_avx2;
3271 "Using Vector Rx (port %d).",
3272 dev->data->port_id);
3273 dev->rx_pkt_burst = ice_recv_pkts_vec;
3281 if (dev->data->scattered_rx) {
3282 /* Set the non-LRO scattered function */
3284 "Using a Scattered function on port %d.",
3285 dev->data->port_id);
3286 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3287 } else if (ad->rx_bulk_alloc_allowed) {
3289 "Rx Burst Bulk Alloc Preconditions are "
3290 "satisfied. Rx Burst Bulk Alloc function "
3291 "will be used on port %d.",
3292 dev->data->port_id);
3293 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3296 "Rx Burst Bulk Alloc Preconditions are not "
3297 "satisfied, Normal Rx will be used on port %d.",
3298 dev->data->port_id);
3299 dev->rx_pkt_burst = ice_recv_pkts;
3303 static const struct {
3304 eth_rx_burst_t pkt_burst;
3306 } ice_rx_burst_infos[] = {
3307 { ice_recv_scattered_pkts, "Scalar Scattered" },
3308 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3309 { ice_recv_pkts, "Scalar" },
3311 #ifdef CC_AVX512_SUPPORT
3312 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3313 { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3314 { ice_recv_pkts_vec_avx512, "Vector AVX512" },
3315 { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3317 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3318 { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
3319 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3320 { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
3321 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3322 { ice_recv_pkts_vec, "Vector SSE" },
3327 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3328 struct rte_eth_burst_mode *mode)
3330 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3334 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3335 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3336 snprintf(mode->info, sizeof(mode->info), "%s",
3337 ice_rx_burst_infos[i].info);
3347 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3349 struct ice_adapter *ad =
3350 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3352 /* Use a simple Tx queue if possible (only fast free is allowed) */
3353 ad->tx_simple_allowed =
3355 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3356 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3358 if (ad->tx_simple_allowed)
3359 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3363 "Simple Tx can NOT be enabled on Tx queue %u.",
3367 /*********************************************************************
3371 **********************************************************************/
3372 /* The default values of TSO MSS */
3373 #define ICE_MIN_TSO_MSS 64
3374 #define ICE_MAX_TSO_MSS 9728
3375 #define ICE_MAX_TSO_FRAME_SIZE 262144
3377 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3384 for (i = 0; i < nb_pkts; i++) {
3386 ol_flags = m->ol_flags;
3388 if (ol_flags & PKT_TX_TCP_SEG &&
3389 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3390 m->tso_segsz > ICE_MAX_TSO_MSS ||
3391 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3393 * MSS outside the range are considered malicious
3399 #ifdef RTE_ETHDEV_DEBUG_TX
3400 ret = rte_validate_tx_offload(m);
3406 ret = rte_net_intel_cksum_prepare(m);
3416 ice_set_tx_function(struct rte_eth_dev *dev)
3418 struct ice_adapter *ad =
3419 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3421 struct ice_tx_queue *txq;
3423 int tx_check_ret = -1;
3425 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3426 ad->tx_use_avx2 = false;
3427 ad->tx_use_avx512 = false;
3428 tx_check_ret = ice_tx_vec_dev_check(dev);
3429 if (tx_check_ret >= 0 &&
3430 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3431 ad->tx_vec_allowed = true;
3433 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3434 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3435 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3436 #ifdef CC_AVX512_SUPPORT
3437 ad->tx_use_avx512 = true;
3440 "AVX512 is not supported in build env");
3442 if (!ad->tx_use_avx512 &&
3443 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3444 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3445 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3446 ad->tx_use_avx2 = true;
3448 if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
3449 tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3450 ad->tx_vec_allowed = false;
3452 if (ad->tx_vec_allowed) {
3453 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3454 txq = dev->data->tx_queues[i];
3455 if (txq && ice_txq_vec_setup(txq)) {
3456 ad->tx_vec_allowed = false;
3462 ad->tx_vec_allowed = false;
3466 if (ad->tx_vec_allowed) {
3467 dev->tx_pkt_prepare = NULL;
3468 if (ad->tx_use_avx512) {
3469 #ifdef CC_AVX512_SUPPORT
3470 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3472 "Using AVX512 OFFLOAD Vector Tx (port %d).",
3473 dev->data->port_id);
3475 ice_xmit_pkts_vec_avx512_offload;
3476 dev->tx_pkt_prepare = ice_prep_pkts;
3479 "Using AVX512 Vector Tx (port %d).",
3480 dev->data->port_id);
3481 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3485 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3487 "Using AVX2 OFFLOAD Vector Tx (port %d).",
3488 dev->data->port_id);
3490 ice_xmit_pkts_vec_avx2_offload;
3491 dev->tx_pkt_prepare = ice_prep_pkts;
3493 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3494 ad->tx_use_avx2 ? "avx2 " : "",
3495 dev->data->port_id);
3496 dev->tx_pkt_burst = ad->tx_use_avx2 ?
3497 ice_xmit_pkts_vec_avx2 :
3506 if (ad->tx_simple_allowed) {
3507 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3508 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3509 dev->tx_pkt_prepare = NULL;
3511 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3512 dev->tx_pkt_burst = ice_xmit_pkts;
3513 dev->tx_pkt_prepare = ice_prep_pkts;
3517 static const struct {
3518 eth_tx_burst_t pkt_burst;
3520 } ice_tx_burst_infos[] = {
3521 { ice_xmit_pkts_simple, "Scalar Simple" },
3522 { ice_xmit_pkts, "Scalar" },
3524 #ifdef CC_AVX512_SUPPORT
3525 { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3526 { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3528 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3529 { ice_xmit_pkts_vec, "Vector SSE" },
3534 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3535 struct rte_eth_burst_mode *mode)
3537 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3541 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3542 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3543 snprintf(mode->info, sizeof(mode->info), "%s",
3544 ice_tx_burst_infos[i].info);
3553 /* For each value it means, datasheet of hardware can tell more details
3555 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3557 static inline uint32_t
3558 ice_get_default_pkt_type(uint16_t ptype)
3560 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3561 __rte_cache_aligned = {
3564 [1] = RTE_PTYPE_L2_ETHER,
3565 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3566 /* [3] - [5] reserved */
3567 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3568 /* [7] - [10] reserved */
3569 [11] = RTE_PTYPE_L2_ETHER_ARP,
3570 /* [12] - [21] reserved */
3572 /* Non tunneled IPv4 */
3573 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3575 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3576 RTE_PTYPE_L4_NONFRAG,
3577 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3580 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3582 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3584 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3588 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3589 RTE_PTYPE_TUNNEL_IP |
3590 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3591 RTE_PTYPE_INNER_L4_FRAG,
3592 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3593 RTE_PTYPE_TUNNEL_IP |
3594 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3595 RTE_PTYPE_INNER_L4_NONFRAG,
3596 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3597 RTE_PTYPE_TUNNEL_IP |
3598 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3599 RTE_PTYPE_INNER_L4_UDP,
3601 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3602 RTE_PTYPE_TUNNEL_IP |
3603 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3604 RTE_PTYPE_INNER_L4_TCP,
3605 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3606 RTE_PTYPE_TUNNEL_IP |
3607 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3608 RTE_PTYPE_INNER_L4_SCTP,
3609 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3610 RTE_PTYPE_TUNNEL_IP |
3611 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3612 RTE_PTYPE_INNER_L4_ICMP,
3615 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3616 RTE_PTYPE_TUNNEL_IP |
3617 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3618 RTE_PTYPE_INNER_L4_FRAG,
3619 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3620 RTE_PTYPE_TUNNEL_IP |
3621 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3622 RTE_PTYPE_INNER_L4_NONFRAG,
3623 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3624 RTE_PTYPE_TUNNEL_IP |
3625 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3626 RTE_PTYPE_INNER_L4_UDP,
3628 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3629 RTE_PTYPE_TUNNEL_IP |
3630 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3631 RTE_PTYPE_INNER_L4_TCP,
3632 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3633 RTE_PTYPE_TUNNEL_IP |
3634 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3635 RTE_PTYPE_INNER_L4_SCTP,
3636 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3637 RTE_PTYPE_TUNNEL_IP |
3638 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3639 RTE_PTYPE_INNER_L4_ICMP,
3641 /* IPv4 --> GRE/Teredo/VXLAN */
3642 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3643 RTE_PTYPE_TUNNEL_GRENAT,
3645 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3646 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3647 RTE_PTYPE_TUNNEL_GRENAT |
3648 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3649 RTE_PTYPE_INNER_L4_FRAG,
3650 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3651 RTE_PTYPE_TUNNEL_GRENAT |
3652 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3653 RTE_PTYPE_INNER_L4_NONFRAG,
3654 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3655 RTE_PTYPE_TUNNEL_GRENAT |
3656 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3657 RTE_PTYPE_INNER_L4_UDP,
3659 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3660 RTE_PTYPE_TUNNEL_GRENAT |
3661 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3662 RTE_PTYPE_INNER_L4_TCP,
3663 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3664 RTE_PTYPE_TUNNEL_GRENAT |
3665 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3666 RTE_PTYPE_INNER_L4_SCTP,
3667 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3668 RTE_PTYPE_TUNNEL_GRENAT |
3669 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3670 RTE_PTYPE_INNER_L4_ICMP,
3672 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3673 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3674 RTE_PTYPE_TUNNEL_GRENAT |
3675 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3676 RTE_PTYPE_INNER_L4_FRAG,
3677 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3678 RTE_PTYPE_TUNNEL_GRENAT |
3679 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3680 RTE_PTYPE_INNER_L4_NONFRAG,
3681 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3682 RTE_PTYPE_TUNNEL_GRENAT |
3683 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3684 RTE_PTYPE_INNER_L4_UDP,
3686 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3687 RTE_PTYPE_TUNNEL_GRENAT |
3688 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3689 RTE_PTYPE_INNER_L4_TCP,
3690 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3691 RTE_PTYPE_TUNNEL_GRENAT |
3692 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3693 RTE_PTYPE_INNER_L4_SCTP,
3694 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3695 RTE_PTYPE_TUNNEL_GRENAT |
3696 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3697 RTE_PTYPE_INNER_L4_ICMP,
3699 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3700 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3701 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3703 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3704 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3705 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3706 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3707 RTE_PTYPE_INNER_L4_FRAG,
3708 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3709 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3710 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3711 RTE_PTYPE_INNER_L4_NONFRAG,
3712 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3713 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3714 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3715 RTE_PTYPE_INNER_L4_UDP,
3717 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3718 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3719 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3720 RTE_PTYPE_INNER_L4_TCP,
3721 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3722 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3723 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3724 RTE_PTYPE_INNER_L4_SCTP,
3725 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3726 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3727 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3728 RTE_PTYPE_INNER_L4_ICMP,
3730 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3731 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3732 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3733 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3734 RTE_PTYPE_INNER_L4_FRAG,
3735 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3736 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3737 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3738 RTE_PTYPE_INNER_L4_NONFRAG,
3739 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3740 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3741 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3742 RTE_PTYPE_INNER_L4_UDP,
3744 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3745 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3746 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3747 RTE_PTYPE_INNER_L4_TCP,
3748 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3749 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3750 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3751 RTE_PTYPE_INNER_L4_SCTP,
3752 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3753 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3754 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3755 RTE_PTYPE_INNER_L4_ICMP,
3756 /* [73] - [87] reserved */
3758 /* Non tunneled IPv6 */
3759 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3761 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3762 RTE_PTYPE_L4_NONFRAG,
3763 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3766 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3768 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3770 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3774 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3775 RTE_PTYPE_TUNNEL_IP |
3776 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3777 RTE_PTYPE_INNER_L4_FRAG,
3778 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3779 RTE_PTYPE_TUNNEL_IP |
3780 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3781 RTE_PTYPE_INNER_L4_NONFRAG,
3782 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3783 RTE_PTYPE_TUNNEL_IP |
3784 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3785 RTE_PTYPE_INNER_L4_UDP,
3787 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3788 RTE_PTYPE_TUNNEL_IP |
3789 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3790 RTE_PTYPE_INNER_L4_TCP,
3791 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3792 RTE_PTYPE_TUNNEL_IP |
3793 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3794 RTE_PTYPE_INNER_L4_SCTP,
3795 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3796 RTE_PTYPE_TUNNEL_IP |
3797 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3798 RTE_PTYPE_INNER_L4_ICMP,
3801 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3802 RTE_PTYPE_TUNNEL_IP |
3803 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3804 RTE_PTYPE_INNER_L4_FRAG,
3805 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3806 RTE_PTYPE_TUNNEL_IP |
3807 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3808 RTE_PTYPE_INNER_L4_NONFRAG,
3809 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3810 RTE_PTYPE_TUNNEL_IP |
3811 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3812 RTE_PTYPE_INNER_L4_UDP,
3813 /* [105] reserved */
3814 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3815 RTE_PTYPE_TUNNEL_IP |
3816 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3817 RTE_PTYPE_INNER_L4_TCP,
3818 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3819 RTE_PTYPE_TUNNEL_IP |
3820 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3821 RTE_PTYPE_INNER_L4_SCTP,
3822 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3823 RTE_PTYPE_TUNNEL_IP |
3824 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3825 RTE_PTYPE_INNER_L4_ICMP,
3827 /* IPv6 --> GRE/Teredo/VXLAN */
3828 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3829 RTE_PTYPE_TUNNEL_GRENAT,
3831 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3832 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3833 RTE_PTYPE_TUNNEL_GRENAT |
3834 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3835 RTE_PTYPE_INNER_L4_FRAG,
3836 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3837 RTE_PTYPE_TUNNEL_GRENAT |
3838 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3839 RTE_PTYPE_INNER_L4_NONFRAG,
3840 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3841 RTE_PTYPE_TUNNEL_GRENAT |
3842 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3843 RTE_PTYPE_INNER_L4_UDP,
3844 /* [113] reserved */
3845 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3846 RTE_PTYPE_TUNNEL_GRENAT |
3847 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3848 RTE_PTYPE_INNER_L4_TCP,
3849 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3850 RTE_PTYPE_TUNNEL_GRENAT |
3851 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3852 RTE_PTYPE_INNER_L4_SCTP,
3853 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3854 RTE_PTYPE_TUNNEL_GRENAT |
3855 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3856 RTE_PTYPE_INNER_L4_ICMP,
3858 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3859 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3860 RTE_PTYPE_TUNNEL_GRENAT |
3861 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3862 RTE_PTYPE_INNER_L4_FRAG,
3863 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3864 RTE_PTYPE_TUNNEL_GRENAT |
3865 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3866 RTE_PTYPE_INNER_L4_NONFRAG,
3867 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3868 RTE_PTYPE_TUNNEL_GRENAT |
3869 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3870 RTE_PTYPE_INNER_L4_UDP,
3871 /* [120] reserved */
3872 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3873 RTE_PTYPE_TUNNEL_GRENAT |
3874 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3875 RTE_PTYPE_INNER_L4_TCP,
3876 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3877 RTE_PTYPE_TUNNEL_GRENAT |
3878 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3879 RTE_PTYPE_INNER_L4_SCTP,
3880 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3881 RTE_PTYPE_TUNNEL_GRENAT |
3882 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3883 RTE_PTYPE_INNER_L4_ICMP,
3885 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3886 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3887 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3889 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3890 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3891 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3892 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3893 RTE_PTYPE_INNER_L4_FRAG,
3894 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3895 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3896 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3897 RTE_PTYPE_INNER_L4_NONFRAG,
3898 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3899 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3900 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3901 RTE_PTYPE_INNER_L4_UDP,
3902 /* [128] reserved */
3903 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3904 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3905 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3906 RTE_PTYPE_INNER_L4_TCP,
3907 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3908 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3909 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3910 RTE_PTYPE_INNER_L4_SCTP,
3911 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3912 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3913 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3914 RTE_PTYPE_INNER_L4_ICMP,
3916 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3917 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3918 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3919 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3920 RTE_PTYPE_INNER_L4_FRAG,
3921 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3922 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3923 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3924 RTE_PTYPE_INNER_L4_NONFRAG,
3925 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3926 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3927 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3928 RTE_PTYPE_INNER_L4_UDP,
3929 /* [135] reserved */
3930 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3931 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3932 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3933 RTE_PTYPE_INNER_L4_TCP,
3934 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3935 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3936 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3937 RTE_PTYPE_INNER_L4_SCTP,
3938 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3939 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3940 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3941 RTE_PTYPE_INNER_L4_ICMP,
3942 /* [139] - [299] reserved */
3945 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3946 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3948 /* PPPoE --> IPv4 */
3949 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3950 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3952 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3953 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3954 RTE_PTYPE_L4_NONFRAG,
3955 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3956 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3958 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3959 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3961 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3962 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3964 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3965 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3968 /* PPPoE --> IPv6 */
3969 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3970 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3972 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3973 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3974 RTE_PTYPE_L4_NONFRAG,
3975 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3976 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3978 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3979 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3981 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3982 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3984 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3985 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3987 /* [314] - [324] reserved */
3989 /* IPv4/IPv6 --> GTPC/GTPU */
3990 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3991 RTE_PTYPE_TUNNEL_GTPC,
3992 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3993 RTE_PTYPE_TUNNEL_GTPC,
3994 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3995 RTE_PTYPE_TUNNEL_GTPC,
3996 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3997 RTE_PTYPE_TUNNEL_GTPC,
3998 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3999 RTE_PTYPE_TUNNEL_GTPU,
4000 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4001 RTE_PTYPE_TUNNEL_GTPU,
4003 /* IPv4 --> GTPU --> IPv4 */
4004 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4005 RTE_PTYPE_TUNNEL_GTPU |
4006 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4007 RTE_PTYPE_INNER_L4_FRAG,
4008 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4009 RTE_PTYPE_TUNNEL_GTPU |
4010 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4011 RTE_PTYPE_INNER_L4_NONFRAG,
4012 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4013 RTE_PTYPE_TUNNEL_GTPU |
4014 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4015 RTE_PTYPE_INNER_L4_UDP,
4016 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4017 RTE_PTYPE_TUNNEL_GTPU |
4018 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4019 RTE_PTYPE_INNER_L4_TCP,
4020 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4021 RTE_PTYPE_TUNNEL_GTPU |
4022 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4023 RTE_PTYPE_INNER_L4_ICMP,
4025 /* IPv6 --> GTPU --> IPv4 */
4026 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4027 RTE_PTYPE_TUNNEL_GTPU |
4028 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4029 RTE_PTYPE_INNER_L4_FRAG,
4030 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4031 RTE_PTYPE_TUNNEL_GTPU |
4032 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4033 RTE_PTYPE_INNER_L4_NONFRAG,
4034 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4035 RTE_PTYPE_TUNNEL_GTPU |
4036 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4037 RTE_PTYPE_INNER_L4_UDP,
4038 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4039 RTE_PTYPE_TUNNEL_GTPU |
4040 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4041 RTE_PTYPE_INNER_L4_TCP,
4042 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4043 RTE_PTYPE_TUNNEL_GTPU |
4044 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4045 RTE_PTYPE_INNER_L4_ICMP,
4047 /* IPv4 --> GTPU --> IPv6 */
4048 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4049 RTE_PTYPE_TUNNEL_GTPU |
4050 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4051 RTE_PTYPE_INNER_L4_FRAG,
4052 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4053 RTE_PTYPE_TUNNEL_GTPU |
4054 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4055 RTE_PTYPE_INNER_L4_NONFRAG,
4056 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4057 RTE_PTYPE_TUNNEL_GTPU |
4058 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4059 RTE_PTYPE_INNER_L4_UDP,
4060 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4061 RTE_PTYPE_TUNNEL_GTPU |
4062 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4063 RTE_PTYPE_INNER_L4_TCP,
4064 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4065 RTE_PTYPE_TUNNEL_GTPU |
4066 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4067 RTE_PTYPE_INNER_L4_ICMP,
4069 /* IPv6 --> GTPU --> IPv6 */
4070 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4071 RTE_PTYPE_TUNNEL_GTPU |
4072 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4073 RTE_PTYPE_INNER_L4_FRAG,
4074 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4075 RTE_PTYPE_TUNNEL_GTPU |
4076 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4077 RTE_PTYPE_INNER_L4_NONFRAG,
4078 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4079 RTE_PTYPE_TUNNEL_GTPU |
4080 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4081 RTE_PTYPE_INNER_L4_UDP,
4082 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4083 RTE_PTYPE_TUNNEL_GTPU |
4084 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4085 RTE_PTYPE_INNER_L4_TCP,
4086 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4087 RTE_PTYPE_TUNNEL_GTPU |
4088 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4089 RTE_PTYPE_INNER_L4_ICMP,
4091 /* IPv4 --> UDP ECPRI */
4092 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4094 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4096 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4098 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4100 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4102 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4104 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4106 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4108 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4110 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4113 /* IPV6 --> UDP ECPRI */
4114 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4116 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4118 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4120 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4122 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4124 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4126 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4128 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4130 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4132 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4134 /* All others reserved */
4137 return type_table[ptype];
4141 ice_set_default_ptype_table(struct rte_eth_dev *dev)
4143 struct ice_adapter *ad =
4144 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4147 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4148 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4151 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
4152 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
4153 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4154 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4155 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4157 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
4158 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
4159 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4160 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
4161 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
4162 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4165 * check the programming status descriptor in rx queue.
4166 * done after Programming Flow Director is programmed on
4170 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4172 volatile union ice_32byte_rx_desc *rxdp;
4179 rxdp = (volatile union ice_32byte_rx_desc *)
4180 (&rxq->rx_ring[rxq->rx_tail]);
4181 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4182 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4183 >> ICE_RXD_QW1_STATUS_S;
4185 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4187 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4188 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4189 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4190 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4192 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4193 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4194 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4195 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4199 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4200 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4202 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4206 rxdp->wb.qword1.status_error_len = 0;
4208 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4210 if (rxq->rx_tail == 0)
4211 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4213 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4219 #define ICE_FDIR_MAX_WAIT_US 10000
4222 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4224 struct ice_tx_queue *txq = pf->fdir.txq;
4225 struct ice_rx_queue *rxq = pf->fdir.rxq;
4226 volatile struct ice_fltr_desc *fdirdp;
4227 volatile struct ice_tx_desc *txdp;
4231 fdirdp = (volatile struct ice_fltr_desc *)
4232 (&txq->tx_ring[txq->tx_tail]);
4233 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4234 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4236 txdp = &txq->tx_ring[txq->tx_tail + 1];
4237 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4238 td_cmd = ICE_TX_DESC_CMD_EOP |
4239 ICE_TX_DESC_CMD_RS |
4240 ICE_TX_DESC_CMD_DUMMY;
4242 txdp->cmd_type_offset_bsz =
4243 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4246 if (txq->tx_tail >= txq->nb_tx_desc)
4248 /* Update the tx tail register */
4249 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4250 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4251 if ((txdp->cmd_type_offset_bsz &
4252 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4253 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4257 if (i >= ICE_FDIR_MAX_WAIT_US) {
4259 "Failed to program FDIR filter: time out to get DD on tx queue.");
4263 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4266 ret = ice_check_fdir_programming_status(rxq);
4274 "Failed to program FDIR filter: programming status reported.");