1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <ethdev_driver.h>
9 #include "rte_pmd_ice.h"
11 #include "ice_rxtx_vec_common.h"
13 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
17 PKT_TX_OUTER_IP_CKSUM)
19 /* Offset of mbuf dynamic field for protocol extraction data */
20 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
22 /* Mask of mbuf dynamic flags for protocol extraction type */
23 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
28 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
31 ice_monitor_callback(const uint64_t value,
32 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
34 const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
36 * we expect the DD bit to be set to 1 if this descriptor was already
39 return (value & m) == m ? -1 : 0;
43 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
45 volatile union ice_rx_flex_desc *rxdp;
46 struct ice_rx_queue *rxq = rx_queue;
50 rxdp = &rxq->rx_ring[desc];
51 /* watch for changes in status bit */
52 pmc->addr = &rxdp->wb.status_error0;
54 /* comparison callback */
55 pmc->fn = ice_monitor_callback;
57 /* register is 16-bit */
58 pmc->size = sizeof(uint16_t);
65 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
67 static uint8_t rxdid_map[] = {
68 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
69 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
70 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
71 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
72 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
73 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
74 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
77 return xtr_type < RTE_DIM(rxdid_map) ?
78 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
82 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
84 volatile union ice_rx_flex_desc *rxdp)
86 volatile struct ice_32b_rx_flex_desc_comms *desc =
87 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
88 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
90 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
91 mb->ol_flags |= PKT_RX_RSS_HASH;
92 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
95 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
96 if (desc->flow_id != 0xFFFFFFFF) {
97 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
98 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
104 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
106 volatile union ice_rx_flex_desc *rxdp)
108 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
109 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
110 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
114 if (desc->flow_id != 0xFFFFFFFF) {
115 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
116 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
119 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
120 stat_err = rte_le_to_cpu_16(desc->status_error0);
121 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
122 mb->ol_flags |= PKT_RX_RSS_HASH;
123 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
129 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
131 volatile union ice_rx_flex_desc *rxdp)
133 volatile struct ice_32b_rx_flex_desc_comms *desc =
134 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
137 stat_err = rte_le_to_cpu_16(desc->status_error0);
138 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
139 mb->ol_flags |= PKT_RX_RSS_HASH;
140 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
143 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
144 if (desc->flow_id != 0xFFFFFFFF) {
145 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
146 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
149 if (rxq->xtr_ol_flag) {
150 uint32_t metadata = 0;
152 stat_err = rte_le_to_cpu_16(desc->status_error1);
154 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
155 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
157 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
159 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
162 mb->ol_flags |= rxq->xtr_ol_flag;
164 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
171 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
173 volatile union ice_rx_flex_desc *rxdp)
175 volatile struct ice_32b_rx_flex_desc_comms *desc =
176 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
179 stat_err = rte_le_to_cpu_16(desc->status_error0);
180 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
181 mb->ol_flags |= PKT_RX_RSS_HASH;
182 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
185 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
186 if (desc->flow_id != 0xFFFFFFFF) {
187 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
188 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
191 if (rxq->xtr_ol_flag) {
192 uint32_t metadata = 0;
194 if (desc->flex_ts.flex.aux0 != 0xFFFF)
195 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
196 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
197 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
200 mb->ol_flags |= rxq->xtr_ol_flag;
202 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
209 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
212 case ICE_RXDID_COMMS_AUX_VLAN:
213 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
214 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
217 case ICE_RXDID_COMMS_AUX_IPV4:
218 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
219 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
222 case ICE_RXDID_COMMS_AUX_IPV6:
223 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
224 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
227 case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
228 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
229 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
232 case ICE_RXDID_COMMS_AUX_TCP:
233 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
234 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
237 case ICE_RXDID_COMMS_AUX_IP_OFFSET:
238 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
239 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
242 case ICE_RXDID_COMMS_GENERIC:
243 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
246 case ICE_RXDID_COMMS_OVS:
247 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
251 /* update this according to the RXDID for PROTO_XTR_NONE */
252 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
256 if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
257 rxq->xtr_ol_flag = 0;
260 static enum ice_status
261 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
263 struct ice_vsi *vsi = rxq->vsi;
264 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
265 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
266 struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
267 struct ice_rlan_ctx rx_ctx;
270 struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
271 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
273 struct ice_adapter *ad = rxq->vsi->adapter;
275 /* Set buffer size as the head split is disabled. */
276 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
277 RTE_PKTMBUF_HEADROOM);
279 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
280 rxq->max_pkt_len = RTE_MIN((uint32_t)
281 ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
282 dev_data->dev_conf.rxmode.max_rx_pkt_len);
284 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
285 if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
286 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
287 PMD_DRV_LOG(ERR, "maximum packet length must "
288 "be larger than %u and smaller than %u,"
289 "as jumbo frame is enabled",
290 (uint32_t)ICE_ETH_MAX_LEN,
291 (uint32_t)ICE_FRAME_SIZE_MAX);
295 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
296 rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
297 PMD_DRV_LOG(ERR, "maximum packet length must be "
298 "larger than %u and smaller than %u, "
299 "as jumbo frame is disabled",
300 (uint32_t)RTE_ETHER_MIN_LEN,
301 (uint32_t)ICE_ETH_MAX_LEN);
306 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
307 /* Register mbuf field and flag for Rx timestamp */
308 err = rte_mbuf_dyn_rx_timestamp_register(
309 &ice_timestamp_dynfield_offset,
310 &ice_timestamp_dynflag);
313 "Cannot register mbuf field/flag for timestamp");
318 memset(&rx_ctx, 0, sizeof(rx_ctx));
320 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
321 rx_ctx.qlen = rxq->nb_rx_desc;
322 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
323 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
324 rx_ctx.dtype = 0; /* No Header Split mode */
325 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
326 rx_ctx.dsize = 1; /* 32B descriptors */
328 rx_ctx.rxmax = rxq->max_pkt_len;
329 /* TPH: Transaction Layer Packet (TLP) processing hints */
330 rx_ctx.tphrdesc_ena = 1;
331 rx_ctx.tphwdesc_ena = 1;
332 rx_ctx.tphdata_ena = 1;
333 rx_ctx.tphhead_ena = 1;
334 /* Low Receive Queue Threshold defined in 64 descriptors units.
335 * When the number of free descriptors goes below the lrxqthresh,
336 * an immediate interrupt is triggered.
338 rx_ctx.lrxqthresh = 2;
339 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
342 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
344 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
346 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
347 rxq->port_id, rxq->queue_id, rxdid);
349 if (!(pf->supported_rxdid & BIT(rxdid))) {
350 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
355 ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
357 /* Enable Flexible Descriptors in the queue context which
358 * allows this driver to select a specific receive descriptor format
360 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
361 QRXFLXP_CNTXT_RXDID_IDX_M;
363 /* increasing context priority to pick up profile ID;
364 * default is 0x01; setting to 0x03 to ensure profile
365 * is programming if prev context is of same priority
367 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
368 QRXFLXP_CNTXT_RXDID_PRIO_M;
370 if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
371 regval |= QRXFLXP_CNTXT_TS_M;
373 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
375 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
377 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
381 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
383 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
388 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
389 RTE_PKTMBUF_HEADROOM);
391 /* Check if scattered RX needs to be used. */
392 if (rxq->max_pkt_len > buf_size)
393 dev_data->scattered_rx = 1;
395 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
397 /* Init the Rx tail register*/
398 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
403 /* Allocate mbufs for all descriptors in rx queue */
405 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
407 struct ice_rx_entry *rxe = rxq->sw_ring;
411 for (i = 0; i < rxq->nb_rx_desc; i++) {
412 volatile union ice_rx_flex_desc *rxd;
413 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
415 if (unlikely(!mbuf)) {
416 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
420 rte_mbuf_refcnt_set(mbuf, 1);
422 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
424 mbuf->port = rxq->port_id;
427 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
429 rxd = &rxq->rx_ring[i];
430 rxd->read.pkt_addr = dma_addr;
431 rxd->read.hdr_addr = 0;
432 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
442 /* Free all mbufs for descriptors in rx queue */
444 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
448 if (!rxq || !rxq->sw_ring) {
449 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
453 for (i = 0; i < rxq->nb_rx_desc; i++) {
454 if (rxq->sw_ring[i].mbuf) {
455 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
456 rxq->sw_ring[i].mbuf = NULL;
459 if (rxq->rx_nb_avail == 0)
461 for (i = 0; i < rxq->rx_nb_avail; i++)
462 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
464 rxq->rx_nb_avail = 0;
467 /* turn on or off rx queue
468 * @q_idx: queue index in pf scope
469 * @on: turn on or off the queue
472 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
477 /* QRX_CTRL = QRX_ENA */
478 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
481 if (reg & QRX_CTRL_QENA_STAT_M)
482 return 0; /* Already on, skip */
483 reg |= QRX_CTRL_QENA_REQ_M;
485 if (!(reg & QRX_CTRL_QENA_STAT_M))
486 return 0; /* Already off, skip */
487 reg &= ~QRX_CTRL_QENA_REQ_M;
490 /* Write the register */
491 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
492 /* Check the result. It is said that QENA_STAT
493 * follows the QENA_REQ not more than 10 use.
494 * TODO: need to change the wait counter later
496 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
497 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
498 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
500 if ((reg & QRX_CTRL_QENA_REQ_M) &&
501 (reg & QRX_CTRL_QENA_STAT_M))
504 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
505 !(reg & QRX_CTRL_QENA_STAT_M))
510 /* Check if it is timeout */
511 if (j >= ICE_CHK_Q_ENA_COUNT) {
512 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
513 (on ? "enable" : "disable"), q_idx);
521 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
525 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
526 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
527 "rxq->rx_free_thresh=%d, "
528 "ICE_RX_MAX_BURST=%d",
529 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
531 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
532 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
533 "rxq->rx_free_thresh=%d, "
534 "rxq->nb_rx_desc=%d",
535 rxq->rx_free_thresh, rxq->nb_rx_desc);
537 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
538 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
539 "rxq->nb_rx_desc=%d, "
540 "rxq->rx_free_thresh=%d",
541 rxq->nb_rx_desc, rxq->rx_free_thresh);
548 /* reset fields in ice_rx_queue back to default */
550 ice_reset_rx_queue(struct ice_rx_queue *rxq)
556 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
560 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
562 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
563 ((volatile char *)rxq->rx_ring)[i] = 0;
565 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
566 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
567 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
569 rxq->rx_nb_avail = 0;
570 rxq->rx_next_avail = 0;
571 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
575 rxq->pkt_first_seg = NULL;
576 rxq->pkt_last_seg = NULL;
578 rxq->rxrearm_start = 0;
583 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
585 struct ice_rx_queue *rxq;
587 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
589 PMD_INIT_FUNC_TRACE();
591 if (rx_queue_id >= dev->data->nb_rx_queues) {
592 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
593 rx_queue_id, dev->data->nb_rx_queues);
597 rxq = dev->data->rx_queues[rx_queue_id];
598 if (!rxq || !rxq->q_set) {
599 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
604 err = ice_program_hw_rx_queue(rxq);
606 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
611 err = ice_alloc_rx_queue_mbufs(rxq);
613 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
617 /* Init the RX tail register. */
618 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
620 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
622 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
625 rxq->rx_rel_mbufs(rxq);
626 ice_reset_rx_queue(rxq);
630 dev->data->rx_queue_state[rx_queue_id] =
631 RTE_ETH_QUEUE_STATE_STARTED;
637 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
639 struct ice_rx_queue *rxq;
641 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
643 if (rx_queue_id < dev->data->nb_rx_queues) {
644 rxq = dev->data->rx_queues[rx_queue_id];
646 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
648 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
652 rxq->rx_rel_mbufs(rxq);
653 ice_reset_rx_queue(rxq);
654 dev->data->rx_queue_state[rx_queue_id] =
655 RTE_ETH_QUEUE_STATE_STOPPED;
662 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
664 struct ice_tx_queue *txq;
668 struct ice_aqc_add_tx_qgrp *txq_elem;
669 struct ice_tlan_ctx tx_ctx;
672 PMD_INIT_FUNC_TRACE();
674 if (tx_queue_id >= dev->data->nb_tx_queues) {
675 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
676 tx_queue_id, dev->data->nb_tx_queues);
680 txq = dev->data->tx_queues[tx_queue_id];
681 if (!txq || !txq->q_set) {
682 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
687 buf_len = ice_struct_size(txq_elem, txqs, 1);
688 txq_elem = ice_malloc(hw, buf_len);
693 hw = ICE_VSI_TO_HW(vsi);
695 memset(&tx_ctx, 0, sizeof(tx_ctx));
696 txq_elem->num_txqs = 1;
697 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
699 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
700 tx_ctx.qlen = txq->nb_tx_desc;
701 tx_ctx.pf_num = hw->pf_id;
702 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
703 tx_ctx.src_vsi = vsi->vsi_id;
704 tx_ctx.port_num = hw->port_info->lport;
705 tx_ctx.tso_ena = 1; /* tso enable */
706 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
707 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
710 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
713 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
715 /* Init the Tx tail register*/
716 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
718 /* Fix me, we assume TC always 0 here */
719 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
720 txq_elem, buf_len, NULL);
722 PMD_DRV_LOG(ERR, "Failed to add lan txq");
726 /* store the schedule node id */
727 txq->q_teid = txq_elem->txqs[0].q_teid;
729 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
735 static enum ice_status
736 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
738 struct ice_vsi *vsi = rxq->vsi;
739 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
740 uint32_t rxdid = ICE_RXDID_LEGACY_1;
741 struct ice_rlan_ctx rx_ctx;
746 rxq->rx_buf_len = 1024;
748 memset(&rx_ctx, 0, sizeof(rx_ctx));
750 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
751 rx_ctx.qlen = rxq->nb_rx_desc;
752 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
753 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
754 rx_ctx.dtype = 0; /* No Header Split mode */
755 rx_ctx.dsize = 1; /* 32B descriptors */
756 rx_ctx.rxmax = ICE_ETH_MAX_LEN;
757 /* TPH: Transaction Layer Packet (TLP) processing hints */
758 rx_ctx.tphrdesc_ena = 1;
759 rx_ctx.tphwdesc_ena = 1;
760 rx_ctx.tphdata_ena = 1;
761 rx_ctx.tphhead_ena = 1;
762 /* Low Receive Queue Threshold defined in 64 descriptors units.
763 * When the number of free descriptors goes below the lrxqthresh,
764 * an immediate interrupt is triggered.
766 rx_ctx.lrxqthresh = 2;
767 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
770 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
772 /* Enable Flexible Descriptors in the queue context which
773 * allows this driver to select a specific receive descriptor format
775 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
776 QRXFLXP_CNTXT_RXDID_IDX_M;
778 /* increasing context priority to pick up profile ID;
779 * default is 0x01; setting to 0x03 to ensure profile
780 * is programming if prev context is of same priority
782 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
783 QRXFLXP_CNTXT_RXDID_PRIO_M;
785 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
787 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
789 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
793 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
795 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
800 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
802 /* Init the Rx tail register*/
803 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
809 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
811 struct ice_rx_queue *rxq;
813 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
814 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
816 PMD_INIT_FUNC_TRACE();
819 if (!rxq || !rxq->q_set) {
820 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
825 err = ice_fdir_program_hw_rx_queue(rxq);
827 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
832 /* Init the RX tail register. */
833 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
835 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
837 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
840 ice_reset_rx_queue(rxq);
848 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
850 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
851 struct ice_tx_queue *txq;
855 struct ice_aqc_add_tx_qgrp *txq_elem;
856 struct ice_tlan_ctx tx_ctx;
859 PMD_INIT_FUNC_TRACE();
862 if (!txq || !txq->q_set) {
863 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
868 buf_len = ice_struct_size(txq_elem, txqs, 1);
869 txq_elem = ice_malloc(hw, buf_len);
874 hw = ICE_VSI_TO_HW(vsi);
876 memset(&tx_ctx, 0, sizeof(tx_ctx));
877 txq_elem->num_txqs = 1;
878 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
880 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
881 tx_ctx.qlen = txq->nb_tx_desc;
882 tx_ctx.pf_num = hw->pf_id;
883 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
884 tx_ctx.src_vsi = vsi->vsi_id;
885 tx_ctx.port_num = hw->port_info->lport;
886 tx_ctx.tso_ena = 1; /* tso enable */
887 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
888 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
890 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
893 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
895 /* Init the Tx tail register*/
896 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
898 /* Fix me, we assume TC always 0 here */
899 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
900 txq_elem, buf_len, NULL);
902 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
906 /* store the schedule node id */
907 txq->q_teid = txq_elem->txqs[0].q_teid;
913 /* Free all mbufs for descriptors in tx queue */
915 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
919 if (!txq || !txq->sw_ring) {
920 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
924 for (i = 0; i < txq->nb_tx_desc; i++) {
925 if (txq->sw_ring[i].mbuf) {
926 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
927 txq->sw_ring[i].mbuf = NULL;
933 ice_reset_tx_queue(struct ice_tx_queue *txq)
935 struct ice_tx_entry *txe;
936 uint16_t i, prev, size;
939 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
944 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
945 for (i = 0; i < size; i++)
946 ((volatile char *)txq->tx_ring)[i] = 0;
948 prev = (uint16_t)(txq->nb_tx_desc - 1);
949 for (i = 0; i < txq->nb_tx_desc; i++) {
950 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
952 txd->cmd_type_offset_bsz =
953 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
956 txe[prev].next_id = i;
960 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
961 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
966 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
967 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
971 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
973 struct ice_tx_queue *txq;
974 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
975 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
976 struct ice_vsi *vsi = pf->main_vsi;
977 enum ice_status status;
980 uint16_t q_handle = tx_queue_id;
982 if (tx_queue_id >= dev->data->nb_tx_queues) {
983 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
984 tx_queue_id, dev->data->nb_tx_queues);
988 txq = dev->data->tx_queues[tx_queue_id];
990 PMD_DRV_LOG(ERR, "TX queue %u is not available",
995 q_ids[0] = txq->reg_idx;
996 q_teids[0] = txq->q_teid;
998 /* Fix me, we assume TC always 0 here */
999 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1000 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1001 if (status != ICE_SUCCESS) {
1002 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1006 txq->tx_rel_mbufs(txq);
1007 ice_reset_tx_queue(txq);
1008 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1014 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1016 struct ice_rx_queue *rxq;
1018 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1019 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1023 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1025 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1029 rxq->rx_rel_mbufs(rxq);
1035 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1037 struct ice_tx_queue *txq;
1038 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1039 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1040 struct ice_vsi *vsi = pf->main_vsi;
1041 enum ice_status status;
1043 uint32_t q_teids[1];
1044 uint16_t q_handle = tx_queue_id;
1048 PMD_DRV_LOG(ERR, "TX queue %u is not available",
1054 q_ids[0] = txq->reg_idx;
1055 q_teids[0] = txq->q_teid;
1057 /* Fix me, we assume TC always 0 here */
1058 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1059 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1060 if (status != ICE_SUCCESS) {
1061 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1065 txq->tx_rel_mbufs(txq);
1071 ice_rx_queue_setup(struct rte_eth_dev *dev,
1074 unsigned int socket_id,
1075 const struct rte_eth_rxconf *rx_conf,
1076 struct rte_mempool *mp)
1078 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1079 struct ice_adapter *ad =
1080 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1081 struct ice_vsi *vsi = pf->main_vsi;
1082 struct ice_rx_queue *rxq;
1083 const struct rte_memzone *rz;
1086 int use_def_burst_func = 1;
1089 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1090 nb_desc > ICE_MAX_RING_DESC ||
1091 nb_desc < ICE_MIN_RING_DESC) {
1092 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1093 "invalid", nb_desc);
1097 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1099 /* Free memory if needed */
1100 if (dev->data->rx_queues[queue_idx]) {
1101 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1102 dev->data->rx_queues[queue_idx] = NULL;
1105 /* Allocate the rx queue data structure */
1106 rxq = rte_zmalloc_socket(NULL,
1107 sizeof(struct ice_rx_queue),
1108 RTE_CACHE_LINE_SIZE,
1111 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1112 "rx queue data structure");
1116 rxq->nb_rx_desc = nb_desc;
1117 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1118 rxq->queue_id = queue_idx;
1119 rxq->offloads = offloads;
1121 rxq->reg_idx = vsi->base_queue + queue_idx;
1122 rxq->port_id = dev->data->port_id;
1123 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1124 rxq->crc_len = RTE_ETHER_CRC_LEN;
1128 rxq->drop_en = rx_conf->rx_drop_en;
1130 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1131 rxq->proto_xtr = pf->proto_xtr != NULL ?
1132 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1134 /* Allocate the maximun number of RX ring hardware descriptor. */
1135 len = ICE_MAX_RING_DESC;
1138 * Allocating a little more memory because vectorized/bulk_alloc Rx
1139 * functions doesn't check boundaries each time.
1141 len += ICE_RX_MAX_BURST;
1143 /* Allocate the maximum number of RX ring hardware descriptor. */
1144 ring_size = sizeof(union ice_rx_flex_desc) * len;
1145 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1146 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1147 ring_size, ICE_RING_BASE_ALIGN,
1150 ice_rx_queue_release(rxq);
1151 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1156 /* Zero all the descriptors in the ring. */
1157 memset(rz->addr, 0, ring_size);
1159 rxq->rx_ring_dma = rz->iova;
1160 rxq->rx_ring = rz->addr;
1162 /* always reserve more for bulk alloc */
1163 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1165 /* Allocate the software ring. */
1166 rxq->sw_ring = rte_zmalloc_socket(NULL,
1167 sizeof(struct ice_rx_entry) * len,
1168 RTE_CACHE_LINE_SIZE,
1170 if (!rxq->sw_ring) {
1171 ice_rx_queue_release(rxq);
1172 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1176 ice_reset_rx_queue(rxq);
1178 dev->data->rx_queues[queue_idx] = rxq;
1179 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1181 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1183 if (!use_def_burst_func) {
1184 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1185 "satisfied. Rx Burst Bulk Alloc function will be "
1186 "used on port=%d, queue=%d.",
1187 rxq->port_id, rxq->queue_id);
1189 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1190 "not satisfied, Scattered Rx is requested. "
1191 "on port=%d, queue=%d.",
1192 rxq->port_id, rxq->queue_id);
1193 ad->rx_bulk_alloc_allowed = false;
1200 ice_rx_queue_release(void *rxq)
1202 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1205 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1210 rte_free(q->sw_ring);
1211 rte_memzone_free(q->mz);
1216 ice_tx_queue_setup(struct rte_eth_dev *dev,
1219 unsigned int socket_id,
1220 const struct rte_eth_txconf *tx_conf)
1222 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1223 struct ice_vsi *vsi = pf->main_vsi;
1224 struct ice_tx_queue *txq;
1225 const struct rte_memzone *tz;
1227 uint16_t tx_rs_thresh, tx_free_thresh;
1230 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1232 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1233 nb_desc > ICE_MAX_RING_DESC ||
1234 nb_desc < ICE_MIN_RING_DESC) {
1235 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1236 "invalid", nb_desc);
1241 * The following two parameters control the setting of the RS bit on
1242 * transmit descriptors. TX descriptors will have their RS bit set
1243 * after txq->tx_rs_thresh descriptors have been used. The TX
1244 * descriptor ring will be cleaned after txq->tx_free_thresh
1245 * descriptors are used or if the number of descriptors required to
1246 * transmit a packet is greater than the number of free TX descriptors.
1248 * The following constraints must be satisfied:
1249 * - tx_rs_thresh must be greater than 0.
1250 * - tx_rs_thresh must be less than the size of the ring minus 2.
1251 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1252 * - tx_rs_thresh must be a divisor of the ring size.
1253 * - tx_free_thresh must be greater than 0.
1254 * - tx_free_thresh must be less than the size of the ring minus 3.
1255 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1257 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1258 * race condition, hence the maximum threshold constraints. When set
1259 * to zero use default values.
1261 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1262 tx_conf->tx_free_thresh :
1263 ICE_DEFAULT_TX_FREE_THRESH);
1264 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1266 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1267 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1268 if (tx_conf->tx_rs_thresh)
1269 tx_rs_thresh = tx_conf->tx_rs_thresh;
1270 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1271 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1272 "exceed nb_desc. (tx_rs_thresh=%u "
1273 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1274 (unsigned int)tx_rs_thresh,
1275 (unsigned int)tx_free_thresh,
1276 (unsigned int)nb_desc,
1277 (int)dev->data->port_id,
1281 if (tx_rs_thresh >= (nb_desc - 2)) {
1282 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1283 "number of TX descriptors minus 2. "
1284 "(tx_rs_thresh=%u port=%d queue=%d)",
1285 (unsigned int)tx_rs_thresh,
1286 (int)dev->data->port_id,
1290 if (tx_free_thresh >= (nb_desc - 3)) {
1291 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1292 "tx_free_thresh must be less than the "
1293 "number of TX descriptors minus 3. "
1294 "(tx_free_thresh=%u port=%d queue=%d)",
1295 (unsigned int)tx_free_thresh,
1296 (int)dev->data->port_id,
1300 if (tx_rs_thresh > tx_free_thresh) {
1301 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1302 "equal to tx_free_thresh. (tx_free_thresh=%u"
1303 " tx_rs_thresh=%u port=%d queue=%d)",
1304 (unsigned int)tx_free_thresh,
1305 (unsigned int)tx_rs_thresh,
1306 (int)dev->data->port_id,
1310 if ((nb_desc % tx_rs_thresh) != 0) {
1311 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1312 "number of TX descriptors. (tx_rs_thresh=%u"
1313 " port=%d queue=%d)",
1314 (unsigned int)tx_rs_thresh,
1315 (int)dev->data->port_id,
1319 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1320 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1321 "tx_rs_thresh is greater than 1. "
1322 "(tx_rs_thresh=%u port=%d queue=%d)",
1323 (unsigned int)tx_rs_thresh,
1324 (int)dev->data->port_id,
1329 /* Free memory if needed. */
1330 if (dev->data->tx_queues[queue_idx]) {
1331 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1332 dev->data->tx_queues[queue_idx] = NULL;
1335 /* Allocate the TX queue data structure. */
1336 txq = rte_zmalloc_socket(NULL,
1337 sizeof(struct ice_tx_queue),
1338 RTE_CACHE_LINE_SIZE,
1341 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1342 "tx queue structure");
1346 /* Allocate TX hardware ring descriptors. */
1347 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1348 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1349 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1350 ring_size, ICE_RING_BASE_ALIGN,
1353 ice_tx_queue_release(txq);
1354 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1359 txq->nb_tx_desc = nb_desc;
1360 txq->tx_rs_thresh = tx_rs_thresh;
1361 txq->tx_free_thresh = tx_free_thresh;
1362 txq->pthresh = tx_conf->tx_thresh.pthresh;
1363 txq->hthresh = tx_conf->tx_thresh.hthresh;
1364 txq->wthresh = tx_conf->tx_thresh.wthresh;
1365 txq->queue_id = queue_idx;
1367 txq->reg_idx = vsi->base_queue + queue_idx;
1368 txq->port_id = dev->data->port_id;
1369 txq->offloads = offloads;
1371 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1373 txq->tx_ring_dma = tz->iova;
1374 txq->tx_ring = tz->addr;
1376 /* Allocate software ring */
1378 rte_zmalloc_socket(NULL,
1379 sizeof(struct ice_tx_entry) * nb_desc,
1380 RTE_CACHE_LINE_SIZE,
1382 if (!txq->sw_ring) {
1383 ice_tx_queue_release(txq);
1384 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1388 ice_reset_tx_queue(txq);
1390 dev->data->tx_queues[queue_idx] = txq;
1391 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1392 ice_set_tx_function_flag(dev, txq);
1398 ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1400 ice_rx_queue_release(dev->data->rx_queues[qid]);
1404 ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1406 ice_tx_queue_release(dev->data->tx_queues[qid]);
1410 ice_tx_queue_release(void *txq)
1412 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1415 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1420 rte_free(q->sw_ring);
1421 rte_memzone_free(q->mz);
1426 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1427 struct rte_eth_rxq_info *qinfo)
1429 struct ice_rx_queue *rxq;
1431 rxq = dev->data->rx_queues[queue_id];
1433 qinfo->mp = rxq->mp;
1434 qinfo->scattered_rx = dev->data->scattered_rx;
1435 qinfo->nb_desc = rxq->nb_rx_desc;
1437 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1438 qinfo->conf.rx_drop_en = rxq->drop_en;
1439 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1443 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1444 struct rte_eth_txq_info *qinfo)
1446 struct ice_tx_queue *txq;
1448 txq = dev->data->tx_queues[queue_id];
1450 qinfo->nb_desc = txq->nb_tx_desc;
1452 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1453 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1454 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1456 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1457 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1458 qinfo->conf.offloads = txq->offloads;
1459 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1463 ice_rx_queue_count(void *rx_queue)
1465 #define ICE_RXQ_SCAN_INTERVAL 4
1466 volatile union ice_rx_flex_desc *rxdp;
1467 struct ice_rx_queue *rxq;
1471 rxdp = &rxq->rx_ring[rxq->rx_tail];
1472 while ((desc < rxq->nb_rx_desc) &&
1473 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1474 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1476 * Check the DD bit of a rx descriptor of each 4 in a group,
1477 * to avoid checking too frequently and downgrading performance
1480 desc += ICE_RXQ_SCAN_INTERVAL;
1481 rxdp += ICE_RXQ_SCAN_INTERVAL;
1482 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1483 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1484 desc - rxq->nb_rx_desc]);
1490 #define ICE_RX_FLEX_ERR0_BITS \
1491 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1492 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1493 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1494 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1495 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1496 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1498 /* Rx L3/L4 checksum */
1499 static inline uint64_t
1500 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1504 /* check if HW has decoded the packet and checksum */
1505 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1508 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1509 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1513 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1514 flags |= PKT_RX_IP_CKSUM_BAD;
1516 flags |= PKT_RX_IP_CKSUM_GOOD;
1518 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1519 flags |= PKT_RX_L4_CKSUM_BAD;
1521 flags |= PKT_RX_L4_CKSUM_GOOD;
1523 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1524 flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1526 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1527 flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1529 flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1535 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1537 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1538 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1539 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1541 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1542 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1543 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1548 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1549 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1550 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1551 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1552 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1553 mb->vlan_tci_outer = mb->vlan_tci;
1554 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1555 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1556 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1557 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1559 mb->vlan_tci_outer = 0;
1562 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1563 mb->vlan_tci, mb->vlan_tci_outer);
1566 #define ICE_LOOK_AHEAD 8
1567 #if (ICE_LOOK_AHEAD != 8)
1568 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1571 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1573 volatile union ice_rx_flex_desc *rxdp;
1574 struct ice_rx_entry *rxep;
1575 struct rte_mbuf *mb;
1578 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1579 int32_t i, j, nb_rx = 0;
1580 uint64_t pkt_flags = 0;
1581 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1582 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1583 struct ice_vsi *vsi = rxq->vsi;
1584 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1586 struct ice_adapter *ad = rxq->vsi->adapter;
1588 rxdp = &rxq->rx_ring[rxq->rx_tail];
1589 rxep = &rxq->sw_ring[rxq->rx_tail];
1591 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1593 /* Make sure there is at least 1 packet to receive */
1594 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1598 * Scan LOOK_AHEAD descriptors at a time to determine which
1599 * descriptors reference packets that are ready to be received.
1601 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1602 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1603 /* Read desc statuses backwards to avoid race condition */
1604 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1605 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1609 /* Compute how many status bits were set */
1610 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1611 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1615 /* Translate descriptor info to mbuf parameters */
1616 for (j = 0; j < nb_dd; j++) {
1618 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1619 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1620 mb->data_len = pkt_len;
1621 mb->pkt_len = pkt_len;
1623 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1624 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1625 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1626 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1627 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1628 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1629 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1630 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1631 ts_ns = ice_tstamp_convert_32b_64b(hw,
1632 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
1633 if (ice_timestamp_dynflag > 0) {
1634 *RTE_MBUF_DYNFIELD(mb,
1635 ice_timestamp_dynfield_offset,
1636 rte_mbuf_timestamp_t *) = ts_ns;
1637 mb->ol_flags |= ice_timestamp_dynflag;
1641 if (ad->ptp_ena && ((mb->packet_type &
1642 RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1644 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1645 mb->timesync = rxq->queue_id;
1646 pkt_flags |= PKT_RX_IEEE1588_PTP;
1649 mb->ol_flags |= pkt_flags;
1652 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1653 rxq->rx_stage[i + j] = rxep[j].mbuf;
1655 if (nb_dd != ICE_LOOK_AHEAD)
1659 /* Clear software ring entries */
1660 for (i = 0; i < nb_rx; i++)
1661 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1663 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1664 "port_id=%u, queue_id=%u, nb_rx=%d",
1665 rxq->port_id, rxq->queue_id, nb_rx);
1670 static inline uint16_t
1671 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1672 struct rte_mbuf **rx_pkts,
1676 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1678 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1680 for (i = 0; i < nb_pkts; i++)
1681 rx_pkts[i] = stage[i];
1683 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1684 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1690 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1692 volatile union ice_rx_flex_desc *rxdp;
1693 struct ice_rx_entry *rxep;
1694 struct rte_mbuf *mb;
1695 uint16_t alloc_idx, i;
1699 /* Allocate buffers in bulk */
1700 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1701 (rxq->rx_free_thresh - 1));
1702 rxep = &rxq->sw_ring[alloc_idx];
1703 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1704 rxq->rx_free_thresh);
1705 if (unlikely(diag != 0)) {
1706 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1710 rxdp = &rxq->rx_ring[alloc_idx];
1711 for (i = 0; i < rxq->rx_free_thresh; i++) {
1712 if (likely(i < (rxq->rx_free_thresh - 1)))
1713 /* Prefetch next mbuf */
1714 rte_prefetch0(rxep[i + 1].mbuf);
1717 rte_mbuf_refcnt_set(mb, 1);
1719 mb->data_off = RTE_PKTMBUF_HEADROOM;
1721 mb->port = rxq->port_id;
1722 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1723 rxdp[i].read.hdr_addr = 0;
1724 rxdp[i].read.pkt_addr = dma_addr;
1727 /* Update rx tail regsiter */
1728 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1730 rxq->rx_free_trigger =
1731 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1732 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1733 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1738 static inline uint16_t
1739 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1741 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1747 if (rxq->rx_nb_avail)
1748 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1750 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1751 rxq->rx_next_avail = 0;
1752 rxq->rx_nb_avail = nb_rx;
1753 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1755 if (rxq->rx_tail > rxq->rx_free_trigger) {
1756 if (ice_rx_alloc_bufs(rxq) != 0) {
1759 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
1760 rxq->rx_free_thresh;
1761 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1762 "port_id=%u, queue_id=%u",
1763 rxq->port_id, rxq->queue_id);
1764 rxq->rx_nb_avail = 0;
1765 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1766 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1767 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1773 if (rxq->rx_tail >= rxq->nb_rx_desc)
1776 if (rxq->rx_nb_avail)
1777 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1783 ice_recv_pkts_bulk_alloc(void *rx_queue,
1784 struct rte_mbuf **rx_pkts,
1791 if (unlikely(nb_pkts == 0))
1794 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1795 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1798 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1799 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1800 nb_rx = (uint16_t)(nb_rx + count);
1801 nb_pkts = (uint16_t)(nb_pkts - count);
1810 ice_recv_scattered_pkts(void *rx_queue,
1811 struct rte_mbuf **rx_pkts,
1814 struct ice_rx_queue *rxq = rx_queue;
1815 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1816 volatile union ice_rx_flex_desc *rxdp;
1817 union ice_rx_flex_desc rxd;
1818 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1819 struct ice_rx_entry *rxe;
1820 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1821 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1822 struct rte_mbuf *nmb; /* new allocated mbuf */
1823 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1824 uint16_t rx_id = rxq->rx_tail;
1826 uint16_t nb_hold = 0;
1827 uint16_t rx_packet_len;
1828 uint16_t rx_stat_err0;
1831 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1832 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1833 struct ice_vsi *vsi = rxq->vsi;
1834 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1836 struct ice_adapter *ad = rxq->vsi->adapter;
1838 while (nb_rx < nb_pkts) {
1839 rxdp = &rx_ring[rx_id];
1840 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1842 /* Check the DD bit first */
1843 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1847 nmb = rte_mbuf_raw_alloc(rxq->mp);
1848 if (unlikely(!nmb)) {
1849 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
1852 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1855 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1857 if (unlikely(rx_id == rxq->nb_rx_desc))
1860 /* Prefetch next mbuf */
1861 rte_prefetch0(sw_ring[rx_id].mbuf);
1864 * When next RX descriptor is on a cache line boundary,
1865 * prefetch the next 4 RX descriptors and next 8 pointers
1868 if ((rx_id & 0x3) == 0) {
1869 rte_prefetch0(&rx_ring[rx_id]);
1870 rte_prefetch0(&sw_ring[rx_id]);
1876 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1878 /* Set data buffer address and data length of the mbuf */
1879 rxdp->read.hdr_addr = 0;
1880 rxdp->read.pkt_addr = dma_addr;
1881 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1882 ICE_RX_FLX_DESC_PKT_LEN_M;
1883 rxm->data_len = rx_packet_len;
1884 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1887 * If this is the first buffer of the received packet, set the
1888 * pointer to the first mbuf of the packet and initialize its
1889 * context. Otherwise, update the total length and the number
1890 * of segments of the current scattered packet, and update the
1891 * pointer to the last mbuf of the current packet.
1895 first_seg->nb_segs = 1;
1896 first_seg->pkt_len = rx_packet_len;
1898 first_seg->pkt_len =
1899 (uint16_t)(first_seg->pkt_len +
1901 first_seg->nb_segs++;
1902 last_seg->next = rxm;
1906 * If this is not the last buffer of the received packet,
1907 * update the pointer to the last mbuf of the current scattered
1908 * packet and continue to parse the RX ring.
1910 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1916 * This is the last buffer of the received packet. If the CRC
1917 * is not stripped by the hardware:
1918 * - Subtract the CRC length from the total packet length.
1919 * - If the last buffer only contains the whole CRC or a part
1920 * of it, free the mbuf associated to the last buffer. If part
1921 * of the CRC is also contained in the previous mbuf, subtract
1922 * the length of that CRC part from the data length of the
1926 if (unlikely(rxq->crc_len > 0)) {
1927 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1928 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1929 rte_pktmbuf_free_seg(rxm);
1930 first_seg->nb_segs--;
1931 last_seg->data_len =
1932 (uint16_t)(last_seg->data_len -
1933 (RTE_ETHER_CRC_LEN - rx_packet_len));
1934 last_seg->next = NULL;
1936 rxm->data_len = (uint16_t)(rx_packet_len -
1940 first_seg->port = rxq->port_id;
1941 first_seg->ol_flags = 0;
1942 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1943 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1944 ice_rxd_to_vlan_tci(first_seg, &rxd);
1945 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1946 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1947 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1948 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1949 ts_ns = ice_tstamp_convert_32b_64b(hw,
1950 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
1951 if (ice_timestamp_dynflag > 0) {
1952 *RTE_MBUF_DYNFIELD(first_seg,
1953 ice_timestamp_dynfield_offset,
1954 rte_mbuf_timestamp_t *) = ts_ns;
1955 first_seg->ol_flags |= ice_timestamp_dynflag;
1959 if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
1960 == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1962 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
1963 first_seg->timesync = rxq->queue_id;
1964 pkt_flags |= PKT_RX_IEEE1588_PTP;
1967 first_seg->ol_flags |= pkt_flags;
1968 /* Prefetch data of first segment, if configured to do so. */
1969 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1970 first_seg->data_off));
1971 rx_pkts[nb_rx++] = first_seg;
1975 /* Record index of the next RX descriptor to probe. */
1976 rxq->rx_tail = rx_id;
1977 rxq->pkt_first_seg = first_seg;
1978 rxq->pkt_last_seg = last_seg;
1981 * If the number of free RX descriptors is greater than the RX free
1982 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1983 * register. Update the RDT with the value of the last processed RX
1984 * descriptor minus 1, to guarantee that the RDT register is never
1985 * equal to the RDH register, which creates a "full" ring situtation
1986 * from the hardware point of view.
1988 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1989 if (nb_hold > rxq->rx_free_thresh) {
1990 rx_id = (uint16_t)(rx_id == 0 ?
1991 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1992 /* write TAIL register */
1993 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1996 rxq->nb_rx_hold = nb_hold;
1998 /* return received packet in the burst */
2003 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2005 struct ice_adapter *ad =
2006 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2007 const uint32_t *ptypes;
2009 static const uint32_t ptypes_os[] = {
2010 /* refers to ice_get_default_pkt_type() */
2012 RTE_PTYPE_L2_ETHER_TIMESYNC,
2013 RTE_PTYPE_L2_ETHER_LLDP,
2014 RTE_PTYPE_L2_ETHER_ARP,
2015 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2016 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2019 RTE_PTYPE_L4_NONFRAG,
2023 RTE_PTYPE_TUNNEL_GRENAT,
2024 RTE_PTYPE_TUNNEL_IP,
2025 RTE_PTYPE_INNER_L2_ETHER,
2026 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2027 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2028 RTE_PTYPE_INNER_L4_FRAG,
2029 RTE_PTYPE_INNER_L4_ICMP,
2030 RTE_PTYPE_INNER_L4_NONFRAG,
2031 RTE_PTYPE_INNER_L4_SCTP,
2032 RTE_PTYPE_INNER_L4_TCP,
2033 RTE_PTYPE_INNER_L4_UDP,
2037 static const uint32_t ptypes_comms[] = {
2038 /* refers to ice_get_default_pkt_type() */
2040 RTE_PTYPE_L2_ETHER_TIMESYNC,
2041 RTE_PTYPE_L2_ETHER_LLDP,
2042 RTE_PTYPE_L2_ETHER_ARP,
2043 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2044 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2047 RTE_PTYPE_L4_NONFRAG,
2051 RTE_PTYPE_TUNNEL_GRENAT,
2052 RTE_PTYPE_TUNNEL_IP,
2053 RTE_PTYPE_INNER_L2_ETHER,
2054 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2055 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2056 RTE_PTYPE_INNER_L4_FRAG,
2057 RTE_PTYPE_INNER_L4_ICMP,
2058 RTE_PTYPE_INNER_L4_NONFRAG,
2059 RTE_PTYPE_INNER_L4_SCTP,
2060 RTE_PTYPE_INNER_L4_TCP,
2061 RTE_PTYPE_INNER_L4_UDP,
2062 RTE_PTYPE_TUNNEL_GTPC,
2063 RTE_PTYPE_TUNNEL_GTPU,
2064 RTE_PTYPE_L2_ETHER_PPPOE,
2068 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
2069 ptypes = ptypes_comms;
2073 if (dev->rx_pkt_burst == ice_recv_pkts ||
2074 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
2075 dev->rx_pkt_burst == ice_recv_scattered_pkts)
2079 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
2080 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
2081 #ifdef CC_AVX512_SUPPORT
2082 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
2083 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
2084 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
2085 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2087 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2088 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
2089 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
2090 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
2098 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2100 volatile union ice_rx_flex_desc *rxdp;
2101 struct ice_rx_queue *rxq = rx_queue;
2104 if (unlikely(offset >= rxq->nb_rx_desc))
2107 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2108 return RTE_ETH_RX_DESC_UNAVAIL;
2110 desc = rxq->rx_tail + offset;
2111 if (desc >= rxq->nb_rx_desc)
2112 desc -= rxq->nb_rx_desc;
2114 rxdp = &rxq->rx_ring[desc];
2115 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2116 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2117 return RTE_ETH_RX_DESC_DONE;
2119 return RTE_ETH_RX_DESC_AVAIL;
2123 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2125 struct ice_tx_queue *txq = tx_queue;
2126 volatile uint64_t *status;
2127 uint64_t mask, expect;
2130 if (unlikely(offset >= txq->nb_tx_desc))
2133 desc = txq->tx_tail + offset;
2134 /* go to next desc that has the RS bit */
2135 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2137 if (desc >= txq->nb_tx_desc) {
2138 desc -= txq->nb_tx_desc;
2139 if (desc >= txq->nb_tx_desc)
2140 desc -= txq->nb_tx_desc;
2143 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2144 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2145 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2146 ICE_TXD_QW1_DTYPE_S);
2147 if ((*status & mask) == expect)
2148 return RTE_ETH_TX_DESC_DONE;
2150 return RTE_ETH_TX_DESC_FULL;
2154 ice_free_queues(struct rte_eth_dev *dev)
2158 PMD_INIT_FUNC_TRACE();
2160 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2161 if (!dev->data->rx_queues[i])
2163 ice_rx_queue_release(dev->data->rx_queues[i]);
2164 dev->data->rx_queues[i] = NULL;
2166 dev->data->nb_rx_queues = 0;
2168 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2169 if (!dev->data->tx_queues[i])
2171 ice_tx_queue_release(dev->data->tx_queues[i]);
2172 dev->data->tx_queues[i] = NULL;
2174 dev->data->nb_tx_queues = 0;
2177 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
2178 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
2181 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2183 struct ice_tx_queue *txq;
2184 const struct rte_memzone *tz = NULL;
2186 struct rte_eth_dev *dev;
2189 PMD_DRV_LOG(ERR, "PF is not available");
2193 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2195 /* Allocate the TX queue data structure. */
2196 txq = rte_zmalloc_socket("ice fdir tx queue",
2197 sizeof(struct ice_tx_queue),
2198 RTE_CACHE_LINE_SIZE,
2201 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2202 "tx queue structure.");
2206 /* Allocate TX hardware ring descriptors. */
2207 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2208 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2210 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2211 ICE_FDIR_QUEUE_ID, ring_size,
2212 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2214 ice_tx_queue_release(txq);
2215 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2220 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2221 txq->queue_id = ICE_FDIR_QUEUE_ID;
2222 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2223 txq->vsi = pf->fdir.fdir_vsi;
2225 txq->tx_ring_dma = tz->iova;
2226 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2228 * don't need to allocate software ring and reset for the fdir
2229 * program queue just set the queue has been configured.
2234 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2240 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2242 struct ice_rx_queue *rxq;
2243 const struct rte_memzone *rz = NULL;
2245 struct rte_eth_dev *dev;
2248 PMD_DRV_LOG(ERR, "PF is not available");
2252 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2254 /* Allocate the RX queue data structure. */
2255 rxq = rte_zmalloc_socket("ice fdir rx queue",
2256 sizeof(struct ice_rx_queue),
2257 RTE_CACHE_LINE_SIZE,
2260 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2261 "rx queue structure.");
2265 /* Allocate RX hardware ring descriptors. */
2266 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2267 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2269 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2270 ICE_FDIR_QUEUE_ID, ring_size,
2271 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2273 ice_rx_queue_release(rxq);
2274 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2279 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2280 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2281 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2282 rxq->vsi = pf->fdir.fdir_vsi;
2284 rxq->rx_ring_dma = rz->iova;
2285 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2286 sizeof(union ice_32byte_rx_desc));
2287 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2290 * Don't need to allocate software ring and reset for the fdir
2291 * rx queue, just set the queue has been configured.
2296 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2302 ice_recv_pkts(void *rx_queue,
2303 struct rte_mbuf **rx_pkts,
2306 struct ice_rx_queue *rxq = rx_queue;
2307 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2308 volatile union ice_rx_flex_desc *rxdp;
2309 union ice_rx_flex_desc rxd;
2310 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2311 struct ice_rx_entry *rxe;
2312 struct rte_mbuf *nmb; /* new allocated mbuf */
2313 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2314 uint16_t rx_id = rxq->rx_tail;
2316 uint16_t nb_hold = 0;
2317 uint16_t rx_packet_len;
2318 uint16_t rx_stat_err0;
2321 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2322 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2323 struct ice_vsi *vsi = rxq->vsi;
2324 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2326 struct ice_adapter *ad = rxq->vsi->adapter;
2328 while (nb_rx < nb_pkts) {
2329 rxdp = &rx_ring[rx_id];
2330 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2332 /* Check the DD bit first */
2333 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2337 nmb = rte_mbuf_raw_alloc(rxq->mp);
2338 if (unlikely(!nmb)) {
2339 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2342 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2345 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2347 if (unlikely(rx_id == rxq->nb_rx_desc))
2352 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2355 * fill the read format of descriptor with physic address in
2356 * new allocated mbuf: nmb
2358 rxdp->read.hdr_addr = 0;
2359 rxdp->read.pkt_addr = dma_addr;
2361 /* calculate rx_packet_len of the received pkt */
2362 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2363 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2365 /* fill old mbuf with received descriptor: rxd */
2366 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2367 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2370 rxm->pkt_len = rx_packet_len;
2371 rxm->data_len = rx_packet_len;
2372 rxm->port = rxq->port_id;
2373 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2374 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2375 ice_rxd_to_vlan_tci(rxm, &rxd);
2376 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
2377 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2378 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2379 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
2380 ts_ns = ice_tstamp_convert_32b_64b(hw,
2381 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
2382 if (ice_timestamp_dynflag > 0) {
2383 *RTE_MBUF_DYNFIELD(rxm,
2384 ice_timestamp_dynfield_offset,
2385 rte_mbuf_timestamp_t *) = ts_ns;
2386 rxm->ol_flags |= ice_timestamp_dynflag;
2390 if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
2391 RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2393 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2394 rxm->timesync = rxq->queue_id;
2395 pkt_flags |= PKT_RX_IEEE1588_PTP;
2398 rxm->ol_flags |= pkt_flags;
2399 /* copy old mbuf to rx_pkts */
2400 rx_pkts[nb_rx++] = rxm;
2402 rxq->rx_tail = rx_id;
2404 * If the number of free RX descriptors is greater than the RX free
2405 * threshold of the queue, advance the receive tail register of queue.
2406 * Update that register with the value of the last processed RX
2407 * descriptor minus 1.
2409 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2410 if (nb_hold > rxq->rx_free_thresh) {
2411 rx_id = (uint16_t)(rx_id == 0 ?
2412 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2413 /* write TAIL register */
2414 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2417 rxq->nb_rx_hold = nb_hold;
2419 /* return received packet in the burst */
2424 ice_parse_tunneling_params(uint64_t ol_flags,
2425 union ice_tx_offload tx_offload,
2426 uint32_t *cd_tunneling)
2428 /* EIPT: External (outer) IP header type */
2429 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2430 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2431 else if (ol_flags & PKT_TX_OUTER_IPV4)
2432 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2433 else if (ol_flags & PKT_TX_OUTER_IPV6)
2434 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2436 /* EIPLEN: External (outer) IP header length, in DWords */
2437 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2438 ICE_TXD_CTX_QW0_EIPLEN_S;
2440 /* L4TUNT: L4 Tunneling Type */
2441 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2442 case PKT_TX_TUNNEL_IPIP:
2443 /* for non UDP / GRE tunneling, set to 00b */
2445 case PKT_TX_TUNNEL_VXLAN:
2446 case PKT_TX_TUNNEL_GTP:
2447 case PKT_TX_TUNNEL_GENEVE:
2448 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2450 case PKT_TX_TUNNEL_GRE:
2451 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2454 PMD_TX_LOG(ERR, "Tunnel type not supported");
2458 /* L4TUNLEN: L4 Tunneling Length, in Words
2460 * We depend on app to set rte_mbuf.l2_len correctly.
2461 * For IP in GRE it should be set to the length of the GRE
2463 * For MAC in GRE or MAC in UDP it should be set to the length
2464 * of the GRE or UDP headers plus the inner MAC up to including
2465 * its last Ethertype.
2466 * If MPLS labels exists, it should include them as well.
2468 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2469 ICE_TXD_CTX_QW0_NATLEN_S;
2472 * Calculate the tunneling UDP checksum.
2473 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2475 if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
2476 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2477 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2481 ice_txd_enable_checksum(uint64_t ol_flags,
2483 uint32_t *td_offset,
2484 union ice_tx_offload tx_offload)
2487 if (ol_flags & PKT_TX_TUNNEL_MASK)
2488 *td_offset |= (tx_offload.outer_l2_len >> 1)
2489 << ICE_TX_DESC_LEN_MACLEN_S;
2491 *td_offset |= (tx_offload.l2_len >> 1)
2492 << ICE_TX_DESC_LEN_MACLEN_S;
2494 /* Enable L3 checksum offloads */
2495 if (ol_flags & PKT_TX_IP_CKSUM) {
2496 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2497 *td_offset |= (tx_offload.l3_len >> 2) <<
2498 ICE_TX_DESC_LEN_IPLEN_S;
2499 } else if (ol_flags & PKT_TX_IPV4) {
2500 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2501 *td_offset |= (tx_offload.l3_len >> 2) <<
2502 ICE_TX_DESC_LEN_IPLEN_S;
2503 } else if (ol_flags & PKT_TX_IPV6) {
2504 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2505 *td_offset |= (tx_offload.l3_len >> 2) <<
2506 ICE_TX_DESC_LEN_IPLEN_S;
2509 if (ol_flags & PKT_TX_TCP_SEG) {
2510 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2511 *td_offset |= (tx_offload.l4_len >> 2) <<
2512 ICE_TX_DESC_LEN_L4_LEN_S;
2516 /* Enable L4 checksum offloads */
2517 switch (ol_flags & PKT_TX_L4_MASK) {
2518 case PKT_TX_TCP_CKSUM:
2519 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2520 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2521 ICE_TX_DESC_LEN_L4_LEN_S;
2523 case PKT_TX_SCTP_CKSUM:
2524 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2525 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2526 ICE_TX_DESC_LEN_L4_LEN_S;
2528 case PKT_TX_UDP_CKSUM:
2529 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2530 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2531 ICE_TX_DESC_LEN_L4_LEN_S;
2539 ice_xmit_cleanup(struct ice_tx_queue *txq)
2541 struct ice_tx_entry *sw_ring = txq->sw_ring;
2542 volatile struct ice_tx_desc *txd = txq->tx_ring;
2543 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2544 uint16_t nb_tx_desc = txq->nb_tx_desc;
2545 uint16_t desc_to_clean_to;
2546 uint16_t nb_tx_to_clean;
2548 /* Determine the last descriptor needing to be cleaned */
2549 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2550 if (desc_to_clean_to >= nb_tx_desc)
2551 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2553 /* Check to make sure the last descriptor to clean is done */
2554 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2555 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2556 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2557 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2558 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2560 txq->port_id, txq->queue_id,
2561 txd[desc_to_clean_to].cmd_type_offset_bsz);
2562 /* Failed to clean any descriptors */
2566 /* Figure out how many descriptors will be cleaned */
2567 if (last_desc_cleaned > desc_to_clean_to)
2568 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2571 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2574 /* The last descriptor to clean is done, so that means all the
2575 * descriptors from the last descriptor that was cleaned
2576 * up to the last descriptor with the RS bit set
2577 * are done. Only reset the threshold descriptor.
2579 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2581 /* Update the txq to reflect the last descriptor that was cleaned */
2582 txq->last_desc_cleaned = desc_to_clean_to;
2583 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2588 /* Construct the tx flags */
2589 static inline uint64_t
2590 ice_build_ctob(uint32_t td_cmd,
2595 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2596 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2597 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2598 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2599 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2602 /* Check if the context descriptor is needed for TX offloading */
2603 static inline uint16_t
2604 ice_calc_context_desc(uint64_t flags)
2606 static uint64_t mask = PKT_TX_TCP_SEG |
2608 PKT_TX_OUTER_IP_CKSUM |
2609 PKT_TX_TUNNEL_MASK |
2610 PKT_TX_IEEE1588_TMST;
2612 return (flags & mask) ? 1 : 0;
2615 /* set ice TSO context descriptor */
2616 static inline uint64_t
2617 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2619 uint64_t ctx_desc = 0;
2620 uint32_t cd_cmd, hdr_len, cd_tso_len;
2622 if (!tx_offload.l4_len) {
2623 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2627 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2628 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2629 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2631 cd_cmd = ICE_TX_CTX_DESC_TSO;
2632 cd_tso_len = mbuf->pkt_len - hdr_len;
2633 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2634 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2635 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2640 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2641 #define ICE_MAX_DATA_PER_TXD \
2642 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2643 /* Calculate the number of TX descriptors needed for each pkt */
2644 static inline uint16_t
2645 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2647 struct rte_mbuf *txd = tx_pkt;
2650 while (txd != NULL) {
2651 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2659 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2661 struct ice_tx_queue *txq;
2662 volatile struct ice_tx_desc *tx_ring;
2663 volatile struct ice_tx_desc *txd;
2664 struct ice_tx_entry *sw_ring;
2665 struct ice_tx_entry *txe, *txn;
2666 struct rte_mbuf *tx_pkt;
2667 struct rte_mbuf *m_seg;
2668 uint32_t cd_tunneling_params;
2673 uint32_t td_cmd = 0;
2674 uint32_t td_offset = 0;
2675 uint32_t td_tag = 0;
2678 uint64_t buf_dma_addr;
2680 union ice_tx_offload tx_offload = {0};
2683 sw_ring = txq->sw_ring;
2684 tx_ring = txq->tx_ring;
2685 tx_id = txq->tx_tail;
2686 txe = &sw_ring[tx_id];
2688 /* Check if the descriptor ring needs to be cleaned. */
2689 if (txq->nb_tx_free < txq->tx_free_thresh)
2690 (void)ice_xmit_cleanup(txq);
2692 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2693 tx_pkt = *tx_pkts++;
2698 ol_flags = tx_pkt->ol_flags;
2699 tx_offload.l2_len = tx_pkt->l2_len;
2700 tx_offload.l3_len = tx_pkt->l3_len;
2701 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2702 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2703 tx_offload.l4_len = tx_pkt->l4_len;
2704 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2705 /* Calculate the number of context descriptors needed. */
2706 nb_ctx = ice_calc_context_desc(ol_flags);
2708 /* The number of descriptors that must be allocated for
2709 * a packet equals to the number of the segments of that
2710 * packet plus the number of context descriptor if needed.
2711 * Recalculate the needed tx descs when TSO enabled in case
2712 * the mbuf data size exceeds max data size that hw allows
2715 if (ol_flags & PKT_TX_TCP_SEG)
2716 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2719 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2720 tx_last = (uint16_t)(tx_id + nb_used - 1);
2723 if (tx_last >= txq->nb_tx_desc)
2724 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2726 if (nb_used > txq->nb_tx_free) {
2727 if (ice_xmit_cleanup(txq) != 0) {
2732 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2733 while (nb_used > txq->nb_tx_free) {
2734 if (ice_xmit_cleanup(txq) != 0) {
2743 /* Descriptor based VLAN insertion */
2744 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2745 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2746 td_tag = tx_pkt->vlan_tci;
2749 /* Fill in tunneling parameters if necessary */
2750 cd_tunneling_params = 0;
2751 if (ol_flags & PKT_TX_TUNNEL_MASK)
2752 ice_parse_tunneling_params(ol_flags, tx_offload,
2753 &cd_tunneling_params);
2755 /* Enable checksum offloading */
2756 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2757 ice_txd_enable_checksum(ol_flags, &td_cmd,
2758 &td_offset, tx_offload);
2761 /* Setup TX context descriptor if required */
2762 volatile struct ice_tx_ctx_desc *ctx_txd =
2763 (volatile struct ice_tx_ctx_desc *)
2765 uint16_t cd_l2tag2 = 0;
2766 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2768 txn = &sw_ring[txe->next_id];
2769 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2771 rte_pktmbuf_free_seg(txe->mbuf);
2775 if (ol_flags & PKT_TX_TCP_SEG)
2776 cd_type_cmd_tso_mss |=
2777 ice_set_tso_ctx(tx_pkt, tx_offload);
2778 else if (ol_flags & PKT_TX_IEEE1588_TMST)
2779 cd_type_cmd_tso_mss |=
2780 ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
2781 ICE_TXD_CTX_QW1_CMD_S);
2783 ctx_txd->tunneling_params =
2784 rte_cpu_to_le_32(cd_tunneling_params);
2786 /* TX context descriptor based double VLAN insert */
2787 if (ol_flags & PKT_TX_QINQ) {
2788 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2789 cd_type_cmd_tso_mss |=
2790 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2791 ICE_TXD_CTX_QW1_CMD_S);
2793 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2795 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2797 txe->last_id = tx_last;
2798 tx_id = txe->next_id;
2804 txd = &tx_ring[tx_id];
2805 txn = &sw_ring[txe->next_id];
2808 rte_pktmbuf_free_seg(txe->mbuf);
2811 /* Setup TX Descriptor */
2812 slen = m_seg->data_len;
2813 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2815 while ((ol_flags & PKT_TX_TCP_SEG) &&
2816 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2817 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2818 txd->cmd_type_offset_bsz =
2819 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2820 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2821 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2822 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2823 ICE_TXD_QW1_TX_BUF_SZ_S) |
2824 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2826 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2827 slen -= ICE_MAX_DATA_PER_TXD;
2829 txe->last_id = tx_last;
2830 tx_id = txe->next_id;
2832 txd = &tx_ring[tx_id];
2833 txn = &sw_ring[txe->next_id];
2836 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2837 txd->cmd_type_offset_bsz =
2838 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2839 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2840 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2841 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2842 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2844 txe->last_id = tx_last;
2845 tx_id = txe->next_id;
2847 m_seg = m_seg->next;
2850 /* fill the last descriptor with End of Packet (EOP) bit */
2851 td_cmd |= ICE_TX_DESC_CMD_EOP;
2852 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2853 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2855 /* set RS bit on the last descriptor of one packet */
2856 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2858 "Setting RS bit on TXD id="
2859 "%4u (port=%d queue=%d)",
2860 tx_last, txq->port_id, txq->queue_id);
2862 td_cmd |= ICE_TX_DESC_CMD_RS;
2864 /* Update txq RS bit counters */
2865 txq->nb_tx_used = 0;
2867 txd->cmd_type_offset_bsz |=
2868 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2872 /* update Tail register */
2873 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2874 txq->tx_tail = tx_id;
2879 static __rte_always_inline int
2880 ice_tx_free_bufs(struct ice_tx_queue *txq)
2882 struct ice_tx_entry *txep;
2885 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2886 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2887 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2890 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2892 for (i = 0; i < txq->tx_rs_thresh; i++)
2893 rte_prefetch0((txep + i)->mbuf);
2895 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2896 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2897 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2901 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2902 rte_pktmbuf_free_seg(txep->mbuf);
2907 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2908 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2909 if (txq->tx_next_dd >= txq->nb_tx_desc)
2910 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2912 return txq->tx_rs_thresh;
2916 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2919 struct ice_tx_entry *swr_ring = txq->sw_ring;
2920 uint16_t i, tx_last, tx_id;
2921 uint16_t nb_tx_free_last;
2922 uint16_t nb_tx_to_clean;
2925 /* Start free mbuf from the next of tx_tail */
2926 tx_last = txq->tx_tail;
2927 tx_id = swr_ring[tx_last].next_id;
2929 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2932 nb_tx_to_clean = txq->nb_tx_free;
2933 nb_tx_free_last = txq->nb_tx_free;
2935 free_cnt = txq->nb_tx_desc;
2937 /* Loop through swr_ring to count the amount of
2938 * freeable mubfs and packets.
2940 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2941 for (i = 0; i < nb_tx_to_clean &&
2942 pkt_cnt < free_cnt &&
2943 tx_id != tx_last; i++) {
2944 if (swr_ring[tx_id].mbuf != NULL) {
2945 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2946 swr_ring[tx_id].mbuf = NULL;
2949 * last segment in the packet,
2950 * increment packet count
2952 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2955 tx_id = swr_ring[tx_id].next_id;
2958 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2959 txq->nb_tx_free || tx_id == tx_last)
2962 if (pkt_cnt < free_cnt) {
2963 if (ice_xmit_cleanup(txq))
2966 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2967 nb_tx_free_last = txq->nb_tx_free;
2971 return (int)pkt_cnt;
2976 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2977 uint32_t free_cnt __rte_unused)
2984 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2989 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2990 free_cnt = txq->nb_tx_desc;
2992 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2994 for (i = 0; i < cnt; i += n) {
2995 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2998 n = ice_tx_free_bufs(txq);
3008 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
3010 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
3011 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
3012 struct ice_adapter *ad =
3013 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3016 if (ad->tx_vec_allowed)
3017 return ice_tx_done_cleanup_vec(q, free_cnt);
3019 if (ad->tx_simple_allowed)
3020 return ice_tx_done_cleanup_simple(q, free_cnt);
3022 return ice_tx_done_cleanup_full(q, free_cnt);
3025 /* Populate 4 descriptors with data from 4 mbufs */
3027 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3032 for (i = 0; i < 4; i++, txdp++, pkts++) {
3033 dma_addr = rte_mbuf_data_iova(*pkts);
3034 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3035 txdp->cmd_type_offset_bsz =
3036 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3037 (*pkts)->data_len, 0);
3041 /* Populate 1 descriptor with data from 1 mbuf */
3043 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3047 dma_addr = rte_mbuf_data_iova(*pkts);
3048 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3049 txdp->cmd_type_offset_bsz =
3050 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3051 (*pkts)->data_len, 0);
3055 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
3058 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
3059 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
3060 const int N_PER_LOOP = 4;
3061 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
3062 int mainpart, leftover;
3066 * Process most of the packets in chunks of N pkts. Any
3067 * leftover packets will get processed one at a time.
3069 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
3070 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
3071 for (i = 0; i < mainpart; i += N_PER_LOOP) {
3072 /* Copy N mbuf pointers to the S/W ring */
3073 for (j = 0; j < N_PER_LOOP; ++j)
3074 (txep + i + j)->mbuf = *(pkts + i + j);
3075 tx4(txdp + i, pkts + i);
3078 if (unlikely(leftover > 0)) {
3079 for (i = 0; i < leftover; ++i) {
3080 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
3081 tx1(txdp + mainpart + i, pkts + mainpart + i);
3086 static inline uint16_t
3087 tx_xmit_pkts(struct ice_tx_queue *txq,
3088 struct rte_mbuf **tx_pkts,
3091 volatile struct ice_tx_desc *txr = txq->tx_ring;
3095 * Begin scanning the H/W ring for done descriptors when the number
3096 * of available descriptors drops below tx_free_thresh. For each done
3097 * descriptor, free the associated buffer.
3099 if (txq->nb_tx_free < txq->tx_free_thresh)
3100 ice_tx_free_bufs(txq);
3102 /* Use available descriptor only */
3103 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
3104 if (unlikely(!nb_pkts))
3107 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
3108 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
3109 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
3110 ice_tx_fill_hw_ring(txq, tx_pkts, n);
3111 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3112 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3114 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3118 /* Fill hardware descriptor ring with mbuf data */
3119 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3120 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3122 /* Determin if RS bit needs to be set */
3123 if (txq->tx_tail > txq->tx_next_rs) {
3124 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3125 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3128 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3129 if (txq->tx_next_rs >= txq->nb_tx_desc)
3130 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3133 if (txq->tx_tail >= txq->nb_tx_desc)
3136 /* Update the tx tail register */
3137 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3143 ice_xmit_pkts_simple(void *tx_queue,
3144 struct rte_mbuf **tx_pkts,
3149 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3150 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3154 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3157 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3158 &tx_pkts[nb_tx], num);
3159 nb_tx = (uint16_t)(nb_tx + ret);
3160 nb_pkts = (uint16_t)(nb_pkts - ret);
3169 ice_set_rx_function(struct rte_eth_dev *dev)
3171 PMD_INIT_FUNC_TRACE();
3172 struct ice_adapter *ad =
3173 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3175 struct ice_rx_queue *rxq;
3177 int rx_check_ret = -1;
3179 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3180 ad->rx_use_avx512 = false;
3181 ad->rx_use_avx2 = false;
3182 rx_check_ret = ice_rx_vec_dev_check(dev);
3185 if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3186 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3187 ad->rx_vec_allowed = true;
3188 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3189 rxq = dev->data->rx_queues[i];
3190 if (rxq && ice_rxq_vec_setup(rxq)) {
3191 ad->rx_vec_allowed = false;
3196 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3197 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3198 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3199 #ifdef CC_AVX512_SUPPORT
3200 ad->rx_use_avx512 = true;
3203 "AVX512 is not supported in build env");
3205 if (!ad->rx_use_avx512 &&
3206 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3207 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3208 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3209 ad->rx_use_avx2 = true;
3212 ad->rx_vec_allowed = false;
3216 if (ad->rx_vec_allowed) {
3217 if (dev->data->scattered_rx) {
3218 if (ad->rx_use_avx512) {
3219 #ifdef CC_AVX512_SUPPORT
3220 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3222 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3223 dev->data->port_id);
3225 ice_recv_scattered_pkts_vec_avx512_offload;
3228 "Using AVX512 Vector Scattered Rx (port %d).",
3229 dev->data->port_id);
3231 ice_recv_scattered_pkts_vec_avx512;
3234 } else if (ad->rx_use_avx2) {
3235 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3237 "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
3238 dev->data->port_id);
3240 ice_recv_scattered_pkts_vec_avx2_offload;
3243 "Using AVX2 Vector Scattered Rx (port %d).",
3244 dev->data->port_id);
3246 ice_recv_scattered_pkts_vec_avx2;
3250 "Using Vector Scattered Rx (port %d).",
3251 dev->data->port_id);
3252 dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
3255 if (ad->rx_use_avx512) {
3256 #ifdef CC_AVX512_SUPPORT
3257 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3259 "Using AVX512 OFFLOAD Vector Rx (port %d).",
3260 dev->data->port_id);
3262 ice_recv_pkts_vec_avx512_offload;
3265 "Using AVX512 Vector Rx (port %d).",
3266 dev->data->port_id);
3268 ice_recv_pkts_vec_avx512;
3271 } else if (ad->rx_use_avx2) {
3272 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3274 "Using AVX2 OFFLOAD Vector Rx (port %d).",
3275 dev->data->port_id);
3277 ice_recv_pkts_vec_avx2_offload;
3280 "Using AVX2 Vector Rx (port %d).",
3281 dev->data->port_id);
3283 ice_recv_pkts_vec_avx2;
3287 "Using Vector Rx (port %d).",
3288 dev->data->port_id);
3289 dev->rx_pkt_burst = ice_recv_pkts_vec;
3297 if (dev->data->scattered_rx) {
3298 /* Set the non-LRO scattered function */
3300 "Using a Scattered function on port %d.",
3301 dev->data->port_id);
3302 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3303 } else if (ad->rx_bulk_alloc_allowed) {
3305 "Rx Burst Bulk Alloc Preconditions are "
3306 "satisfied. Rx Burst Bulk Alloc function "
3307 "will be used on port %d.",
3308 dev->data->port_id);
3309 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3312 "Rx Burst Bulk Alloc Preconditions are not "
3313 "satisfied, Normal Rx will be used on port %d.",
3314 dev->data->port_id);
3315 dev->rx_pkt_burst = ice_recv_pkts;
3319 static const struct {
3320 eth_rx_burst_t pkt_burst;
3322 } ice_rx_burst_infos[] = {
3323 { ice_recv_scattered_pkts, "Scalar Scattered" },
3324 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3325 { ice_recv_pkts, "Scalar" },
3327 #ifdef CC_AVX512_SUPPORT
3328 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3329 { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3330 { ice_recv_pkts_vec_avx512, "Vector AVX512" },
3331 { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3333 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3334 { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
3335 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3336 { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
3337 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3338 { ice_recv_pkts_vec, "Vector SSE" },
3343 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3344 struct rte_eth_burst_mode *mode)
3346 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3350 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3351 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3352 snprintf(mode->info, sizeof(mode->info), "%s",
3353 ice_rx_burst_infos[i].info);
3363 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3365 struct ice_adapter *ad =
3366 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3368 /* Use a simple Tx queue if possible (only fast free is allowed) */
3369 ad->tx_simple_allowed =
3371 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3372 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3374 if (ad->tx_simple_allowed)
3375 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3379 "Simple Tx can NOT be enabled on Tx queue %u.",
3383 /*********************************************************************
3387 **********************************************************************/
3388 /* The default values of TSO MSS */
3389 #define ICE_MIN_TSO_MSS 64
3390 #define ICE_MAX_TSO_MSS 9728
3391 #define ICE_MAX_TSO_FRAME_SIZE 262144
3393 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3400 for (i = 0; i < nb_pkts; i++) {
3402 ol_flags = m->ol_flags;
3404 if (ol_flags & PKT_TX_TCP_SEG &&
3405 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3406 m->tso_segsz > ICE_MAX_TSO_MSS ||
3407 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3409 * MSS outside the range are considered malicious
3415 #ifdef RTE_ETHDEV_DEBUG_TX
3416 ret = rte_validate_tx_offload(m);
3422 ret = rte_net_intel_cksum_prepare(m);
3432 ice_set_tx_function(struct rte_eth_dev *dev)
3434 struct ice_adapter *ad =
3435 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3437 struct ice_tx_queue *txq;
3439 int tx_check_ret = -1;
3441 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3442 ad->tx_use_avx2 = false;
3443 ad->tx_use_avx512 = false;
3444 tx_check_ret = ice_tx_vec_dev_check(dev);
3445 if (tx_check_ret >= 0 &&
3446 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3447 ad->tx_vec_allowed = true;
3449 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3450 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3451 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3452 #ifdef CC_AVX512_SUPPORT
3453 ad->tx_use_avx512 = true;
3456 "AVX512 is not supported in build env");
3458 if (!ad->tx_use_avx512 &&
3459 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3460 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3461 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3462 ad->tx_use_avx2 = true;
3464 if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
3465 tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3466 ad->tx_vec_allowed = false;
3468 if (ad->tx_vec_allowed) {
3469 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3470 txq = dev->data->tx_queues[i];
3471 if (txq && ice_txq_vec_setup(txq)) {
3472 ad->tx_vec_allowed = false;
3478 ad->tx_vec_allowed = false;
3482 if (ad->tx_vec_allowed) {
3483 dev->tx_pkt_prepare = NULL;
3484 if (ad->tx_use_avx512) {
3485 #ifdef CC_AVX512_SUPPORT
3486 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3488 "Using AVX512 OFFLOAD Vector Tx (port %d).",
3489 dev->data->port_id);
3491 ice_xmit_pkts_vec_avx512_offload;
3492 dev->tx_pkt_prepare = ice_prep_pkts;
3495 "Using AVX512 Vector Tx (port %d).",
3496 dev->data->port_id);
3497 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3501 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3503 "Using AVX2 OFFLOAD Vector Tx (port %d).",
3504 dev->data->port_id);
3506 ice_xmit_pkts_vec_avx2_offload;
3507 dev->tx_pkt_prepare = ice_prep_pkts;
3509 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3510 ad->tx_use_avx2 ? "avx2 " : "",
3511 dev->data->port_id);
3512 dev->tx_pkt_burst = ad->tx_use_avx2 ?
3513 ice_xmit_pkts_vec_avx2 :
3522 if (ad->tx_simple_allowed) {
3523 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3524 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3525 dev->tx_pkt_prepare = NULL;
3527 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3528 dev->tx_pkt_burst = ice_xmit_pkts;
3529 dev->tx_pkt_prepare = ice_prep_pkts;
3533 static const struct {
3534 eth_tx_burst_t pkt_burst;
3536 } ice_tx_burst_infos[] = {
3537 { ice_xmit_pkts_simple, "Scalar Simple" },
3538 { ice_xmit_pkts, "Scalar" },
3540 #ifdef CC_AVX512_SUPPORT
3541 { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3542 { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3544 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3545 { ice_xmit_pkts_vec, "Vector SSE" },
3550 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3551 struct rte_eth_burst_mode *mode)
3553 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3557 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3558 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3559 snprintf(mode->info, sizeof(mode->info), "%s",
3560 ice_tx_burst_infos[i].info);
3569 /* For each value it means, datasheet of hardware can tell more details
3571 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3573 static inline uint32_t
3574 ice_get_default_pkt_type(uint16_t ptype)
3576 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3577 __rte_cache_aligned = {
3580 [1] = RTE_PTYPE_L2_ETHER,
3581 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3582 /* [3] - [5] reserved */
3583 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3584 /* [7] - [10] reserved */
3585 [11] = RTE_PTYPE_L2_ETHER_ARP,
3586 /* [12] - [21] reserved */
3588 /* Non tunneled IPv4 */
3589 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3591 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3592 RTE_PTYPE_L4_NONFRAG,
3593 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3596 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3598 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3600 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3604 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3605 RTE_PTYPE_TUNNEL_IP |
3606 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3607 RTE_PTYPE_INNER_L4_FRAG,
3608 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3609 RTE_PTYPE_TUNNEL_IP |
3610 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3611 RTE_PTYPE_INNER_L4_NONFRAG,
3612 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3613 RTE_PTYPE_TUNNEL_IP |
3614 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3615 RTE_PTYPE_INNER_L4_UDP,
3617 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3618 RTE_PTYPE_TUNNEL_IP |
3619 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3620 RTE_PTYPE_INNER_L4_TCP,
3621 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3622 RTE_PTYPE_TUNNEL_IP |
3623 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3624 RTE_PTYPE_INNER_L4_SCTP,
3625 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3626 RTE_PTYPE_TUNNEL_IP |
3627 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3628 RTE_PTYPE_INNER_L4_ICMP,
3631 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3632 RTE_PTYPE_TUNNEL_IP |
3633 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3634 RTE_PTYPE_INNER_L4_FRAG,
3635 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3636 RTE_PTYPE_TUNNEL_IP |
3637 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3638 RTE_PTYPE_INNER_L4_NONFRAG,
3639 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3640 RTE_PTYPE_TUNNEL_IP |
3641 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3642 RTE_PTYPE_INNER_L4_UDP,
3644 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3645 RTE_PTYPE_TUNNEL_IP |
3646 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3647 RTE_PTYPE_INNER_L4_TCP,
3648 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3649 RTE_PTYPE_TUNNEL_IP |
3650 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3651 RTE_PTYPE_INNER_L4_SCTP,
3652 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3653 RTE_PTYPE_TUNNEL_IP |
3654 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3655 RTE_PTYPE_INNER_L4_ICMP,
3657 /* IPv4 --> GRE/Teredo/VXLAN */
3658 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3659 RTE_PTYPE_TUNNEL_GRENAT,
3661 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3662 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3663 RTE_PTYPE_TUNNEL_GRENAT |
3664 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3665 RTE_PTYPE_INNER_L4_FRAG,
3666 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3667 RTE_PTYPE_TUNNEL_GRENAT |
3668 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3669 RTE_PTYPE_INNER_L4_NONFRAG,
3670 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3671 RTE_PTYPE_TUNNEL_GRENAT |
3672 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3673 RTE_PTYPE_INNER_L4_UDP,
3675 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3676 RTE_PTYPE_TUNNEL_GRENAT |
3677 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3678 RTE_PTYPE_INNER_L4_TCP,
3679 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3680 RTE_PTYPE_TUNNEL_GRENAT |
3681 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3682 RTE_PTYPE_INNER_L4_SCTP,
3683 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3684 RTE_PTYPE_TUNNEL_GRENAT |
3685 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3686 RTE_PTYPE_INNER_L4_ICMP,
3688 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3689 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3690 RTE_PTYPE_TUNNEL_GRENAT |
3691 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3692 RTE_PTYPE_INNER_L4_FRAG,
3693 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3694 RTE_PTYPE_TUNNEL_GRENAT |
3695 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3696 RTE_PTYPE_INNER_L4_NONFRAG,
3697 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3698 RTE_PTYPE_TUNNEL_GRENAT |
3699 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3700 RTE_PTYPE_INNER_L4_UDP,
3702 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3703 RTE_PTYPE_TUNNEL_GRENAT |
3704 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3705 RTE_PTYPE_INNER_L4_TCP,
3706 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3707 RTE_PTYPE_TUNNEL_GRENAT |
3708 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3709 RTE_PTYPE_INNER_L4_SCTP,
3710 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3711 RTE_PTYPE_TUNNEL_GRENAT |
3712 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3713 RTE_PTYPE_INNER_L4_ICMP,
3715 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3716 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3717 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3719 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3720 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3721 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3722 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3723 RTE_PTYPE_INNER_L4_FRAG,
3724 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3725 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3726 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3727 RTE_PTYPE_INNER_L4_NONFRAG,
3728 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3729 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3730 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3731 RTE_PTYPE_INNER_L4_UDP,
3733 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3734 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3735 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3736 RTE_PTYPE_INNER_L4_TCP,
3737 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3738 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3739 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3740 RTE_PTYPE_INNER_L4_SCTP,
3741 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3742 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3743 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3744 RTE_PTYPE_INNER_L4_ICMP,
3746 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3747 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3748 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3749 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3750 RTE_PTYPE_INNER_L4_FRAG,
3751 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3752 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3753 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3754 RTE_PTYPE_INNER_L4_NONFRAG,
3755 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3756 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3757 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3758 RTE_PTYPE_INNER_L4_UDP,
3760 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3761 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3762 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3763 RTE_PTYPE_INNER_L4_TCP,
3764 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3765 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3766 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3767 RTE_PTYPE_INNER_L4_SCTP,
3768 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3769 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3770 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3771 RTE_PTYPE_INNER_L4_ICMP,
3772 /* [73] - [87] reserved */
3774 /* Non tunneled IPv6 */
3775 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3777 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3778 RTE_PTYPE_L4_NONFRAG,
3779 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3782 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3784 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3786 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3790 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3791 RTE_PTYPE_TUNNEL_IP |
3792 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3793 RTE_PTYPE_INNER_L4_FRAG,
3794 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3795 RTE_PTYPE_TUNNEL_IP |
3796 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3797 RTE_PTYPE_INNER_L4_NONFRAG,
3798 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3799 RTE_PTYPE_TUNNEL_IP |
3800 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3801 RTE_PTYPE_INNER_L4_UDP,
3803 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3804 RTE_PTYPE_TUNNEL_IP |
3805 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3806 RTE_PTYPE_INNER_L4_TCP,
3807 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3808 RTE_PTYPE_TUNNEL_IP |
3809 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3810 RTE_PTYPE_INNER_L4_SCTP,
3811 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3812 RTE_PTYPE_TUNNEL_IP |
3813 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3814 RTE_PTYPE_INNER_L4_ICMP,
3817 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3818 RTE_PTYPE_TUNNEL_IP |
3819 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3820 RTE_PTYPE_INNER_L4_FRAG,
3821 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3822 RTE_PTYPE_TUNNEL_IP |
3823 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3824 RTE_PTYPE_INNER_L4_NONFRAG,
3825 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3826 RTE_PTYPE_TUNNEL_IP |
3827 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3828 RTE_PTYPE_INNER_L4_UDP,
3829 /* [105] reserved */
3830 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3831 RTE_PTYPE_TUNNEL_IP |
3832 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3833 RTE_PTYPE_INNER_L4_TCP,
3834 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3835 RTE_PTYPE_TUNNEL_IP |
3836 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3837 RTE_PTYPE_INNER_L4_SCTP,
3838 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3839 RTE_PTYPE_TUNNEL_IP |
3840 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3841 RTE_PTYPE_INNER_L4_ICMP,
3843 /* IPv6 --> GRE/Teredo/VXLAN */
3844 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3845 RTE_PTYPE_TUNNEL_GRENAT,
3847 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3848 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3849 RTE_PTYPE_TUNNEL_GRENAT |
3850 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3851 RTE_PTYPE_INNER_L4_FRAG,
3852 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3853 RTE_PTYPE_TUNNEL_GRENAT |
3854 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3855 RTE_PTYPE_INNER_L4_NONFRAG,
3856 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3857 RTE_PTYPE_TUNNEL_GRENAT |
3858 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3859 RTE_PTYPE_INNER_L4_UDP,
3860 /* [113] reserved */
3861 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3862 RTE_PTYPE_TUNNEL_GRENAT |
3863 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3864 RTE_PTYPE_INNER_L4_TCP,
3865 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3866 RTE_PTYPE_TUNNEL_GRENAT |
3867 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3868 RTE_PTYPE_INNER_L4_SCTP,
3869 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3870 RTE_PTYPE_TUNNEL_GRENAT |
3871 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3872 RTE_PTYPE_INNER_L4_ICMP,
3874 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3875 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3876 RTE_PTYPE_TUNNEL_GRENAT |
3877 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3878 RTE_PTYPE_INNER_L4_FRAG,
3879 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3880 RTE_PTYPE_TUNNEL_GRENAT |
3881 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3882 RTE_PTYPE_INNER_L4_NONFRAG,
3883 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3884 RTE_PTYPE_TUNNEL_GRENAT |
3885 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3886 RTE_PTYPE_INNER_L4_UDP,
3887 /* [120] reserved */
3888 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3889 RTE_PTYPE_TUNNEL_GRENAT |
3890 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3891 RTE_PTYPE_INNER_L4_TCP,
3892 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3893 RTE_PTYPE_TUNNEL_GRENAT |
3894 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3895 RTE_PTYPE_INNER_L4_SCTP,
3896 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3897 RTE_PTYPE_TUNNEL_GRENAT |
3898 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3899 RTE_PTYPE_INNER_L4_ICMP,
3901 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3902 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3903 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3905 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3906 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3907 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3908 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3909 RTE_PTYPE_INNER_L4_FRAG,
3910 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3911 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3912 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3913 RTE_PTYPE_INNER_L4_NONFRAG,
3914 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3915 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3916 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3917 RTE_PTYPE_INNER_L4_UDP,
3918 /* [128] reserved */
3919 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3920 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3921 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3922 RTE_PTYPE_INNER_L4_TCP,
3923 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3924 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3925 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3926 RTE_PTYPE_INNER_L4_SCTP,
3927 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3928 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3929 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3930 RTE_PTYPE_INNER_L4_ICMP,
3932 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3933 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3934 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3935 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3936 RTE_PTYPE_INNER_L4_FRAG,
3937 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3938 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3939 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3940 RTE_PTYPE_INNER_L4_NONFRAG,
3941 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3942 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3943 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3944 RTE_PTYPE_INNER_L4_UDP,
3945 /* [135] reserved */
3946 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3947 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3948 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3949 RTE_PTYPE_INNER_L4_TCP,
3950 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3951 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3952 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3953 RTE_PTYPE_INNER_L4_SCTP,
3954 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3955 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3956 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3957 RTE_PTYPE_INNER_L4_ICMP,
3958 /* [139] - [299] reserved */
3961 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3962 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3964 /* PPPoE --> IPv4 */
3965 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3966 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3968 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3969 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3970 RTE_PTYPE_L4_NONFRAG,
3971 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3972 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3974 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3975 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3977 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3978 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3980 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3981 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3984 /* PPPoE --> IPv6 */
3985 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3986 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3988 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3989 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3990 RTE_PTYPE_L4_NONFRAG,
3991 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3992 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3994 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3995 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3997 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3998 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4000 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
4001 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4003 /* [314] - [324] reserved */
4005 /* IPv4/IPv6 --> GTPC/GTPU */
4006 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4007 RTE_PTYPE_TUNNEL_GTPC,
4008 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4009 RTE_PTYPE_TUNNEL_GTPC,
4010 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4011 RTE_PTYPE_TUNNEL_GTPC,
4012 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4013 RTE_PTYPE_TUNNEL_GTPC,
4014 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4015 RTE_PTYPE_TUNNEL_GTPU,
4016 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4017 RTE_PTYPE_TUNNEL_GTPU,
4019 /* IPv4 --> GTPU --> IPv4 */
4020 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4021 RTE_PTYPE_TUNNEL_GTPU |
4022 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4023 RTE_PTYPE_INNER_L4_FRAG,
4024 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4025 RTE_PTYPE_TUNNEL_GTPU |
4026 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4027 RTE_PTYPE_INNER_L4_NONFRAG,
4028 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4029 RTE_PTYPE_TUNNEL_GTPU |
4030 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4031 RTE_PTYPE_INNER_L4_UDP,
4032 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4033 RTE_PTYPE_TUNNEL_GTPU |
4034 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4035 RTE_PTYPE_INNER_L4_TCP,
4036 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4037 RTE_PTYPE_TUNNEL_GTPU |
4038 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4039 RTE_PTYPE_INNER_L4_ICMP,
4041 /* IPv6 --> GTPU --> IPv4 */
4042 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4043 RTE_PTYPE_TUNNEL_GTPU |
4044 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4045 RTE_PTYPE_INNER_L4_FRAG,
4046 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4047 RTE_PTYPE_TUNNEL_GTPU |
4048 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4049 RTE_PTYPE_INNER_L4_NONFRAG,
4050 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4051 RTE_PTYPE_TUNNEL_GTPU |
4052 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4053 RTE_PTYPE_INNER_L4_UDP,
4054 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4055 RTE_PTYPE_TUNNEL_GTPU |
4056 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4057 RTE_PTYPE_INNER_L4_TCP,
4058 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4059 RTE_PTYPE_TUNNEL_GTPU |
4060 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4061 RTE_PTYPE_INNER_L4_ICMP,
4063 /* IPv4 --> GTPU --> IPv6 */
4064 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4065 RTE_PTYPE_TUNNEL_GTPU |
4066 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4067 RTE_PTYPE_INNER_L4_FRAG,
4068 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4069 RTE_PTYPE_TUNNEL_GTPU |
4070 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4071 RTE_PTYPE_INNER_L4_NONFRAG,
4072 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4073 RTE_PTYPE_TUNNEL_GTPU |
4074 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4075 RTE_PTYPE_INNER_L4_UDP,
4076 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4077 RTE_PTYPE_TUNNEL_GTPU |
4078 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4079 RTE_PTYPE_INNER_L4_TCP,
4080 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4081 RTE_PTYPE_TUNNEL_GTPU |
4082 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4083 RTE_PTYPE_INNER_L4_ICMP,
4085 /* IPv6 --> GTPU --> IPv6 */
4086 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4087 RTE_PTYPE_TUNNEL_GTPU |
4088 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4089 RTE_PTYPE_INNER_L4_FRAG,
4090 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4091 RTE_PTYPE_TUNNEL_GTPU |
4092 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4093 RTE_PTYPE_INNER_L4_NONFRAG,
4094 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4095 RTE_PTYPE_TUNNEL_GTPU |
4096 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4097 RTE_PTYPE_INNER_L4_UDP,
4098 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4099 RTE_PTYPE_TUNNEL_GTPU |
4100 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4101 RTE_PTYPE_INNER_L4_TCP,
4102 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4103 RTE_PTYPE_TUNNEL_GTPU |
4104 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4105 RTE_PTYPE_INNER_L4_ICMP,
4107 /* IPv4 --> UDP ECPRI */
4108 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4110 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4112 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4114 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4116 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4118 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4120 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4122 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4124 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4126 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4129 /* IPV6 --> UDP ECPRI */
4130 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4132 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4134 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4136 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4138 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4140 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4142 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4144 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4146 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4148 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4150 /* All others reserved */
4153 return type_table[ptype];
4157 ice_set_default_ptype_table(struct rte_eth_dev *dev)
4159 struct ice_adapter *ad =
4160 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4163 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4164 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4167 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
4168 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
4169 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4170 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4171 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4173 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
4174 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
4175 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4176 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
4177 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
4178 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4181 * check the programming status descriptor in rx queue.
4182 * done after Programming Flow Director is programmed on
4186 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4188 volatile union ice_32byte_rx_desc *rxdp;
4195 rxdp = (volatile union ice_32byte_rx_desc *)
4196 (&rxq->rx_ring[rxq->rx_tail]);
4197 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4198 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4199 >> ICE_RXD_QW1_STATUS_S;
4201 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4203 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4204 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4205 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4206 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4208 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4209 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4210 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4211 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4215 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4216 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4218 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4222 rxdp->wb.qword1.status_error_len = 0;
4224 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4226 if (rxq->rx_tail == 0)
4227 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4229 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4235 #define ICE_FDIR_MAX_WAIT_US 10000
4238 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4240 struct ice_tx_queue *txq = pf->fdir.txq;
4241 struct ice_rx_queue *rxq = pf->fdir.rxq;
4242 volatile struct ice_fltr_desc *fdirdp;
4243 volatile struct ice_tx_desc *txdp;
4247 fdirdp = (volatile struct ice_fltr_desc *)
4248 (&txq->tx_ring[txq->tx_tail]);
4249 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4250 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4252 txdp = &txq->tx_ring[txq->tx_tail + 1];
4253 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4254 td_cmd = ICE_TX_DESC_CMD_EOP |
4255 ICE_TX_DESC_CMD_RS |
4256 ICE_TX_DESC_CMD_DUMMY;
4258 txdp->cmd_type_offset_bsz =
4259 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4262 if (txq->tx_tail >= txq->nb_tx_desc)
4264 /* Update the tx tail register */
4265 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4266 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4267 if ((txdp->cmd_type_offset_bsz &
4268 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4269 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4273 if (i >= ICE_FDIR_MAX_WAIT_US) {
4275 "Failed to program FDIR filter: time out to get DD on tx queue.");
4279 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4282 ret = ice_check_fdir_programming_status(rxq);
4290 "Failed to program FDIR filter: programming status reported.");