1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <ethdev_driver.h>
9 #include "rte_pmd_ice.h"
11 #include "ice_rxtx_vec_common.h"
13 #define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
14 RTE_MBUF_F_TX_L4_MASK | \
15 RTE_MBUF_F_TX_TCP_SEG | \
16 RTE_MBUF_F_TX_OUTER_IP_CKSUM)
18 /* Offset of mbuf dynamic field for protocol extraction data */
19 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
21 /* Mask of mbuf dynamic flags for protocol extraction type */
22 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
30 ice_monitor_callback(const uint64_t value,
31 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
33 const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
35 * we expect the DD bit to be set to 1 if this descriptor was already
38 return (value & m) == m ? -1 : 0;
42 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
44 volatile union ice_rx_flex_desc *rxdp;
45 struct ice_rx_queue *rxq = rx_queue;
49 rxdp = &rxq->rx_ring[desc];
50 /* watch for changes in status bit */
51 pmc->addr = &rxdp->wb.status_error0;
53 /* comparison callback */
54 pmc->fn = ice_monitor_callback;
56 /* register is 16-bit */
57 pmc->size = sizeof(uint16_t);
64 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
66 static uint8_t rxdid_map[] = {
67 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
68 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
69 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
70 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
71 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
72 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
73 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
76 return xtr_type < RTE_DIM(rxdid_map) ?
77 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
81 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
83 volatile union ice_rx_flex_desc *rxdp)
85 volatile struct ice_32b_rx_flex_desc_comms *desc =
86 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
87 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
89 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
90 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
91 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
94 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
95 if (desc->flow_id != 0xFFFFFFFF) {
96 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
97 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
103 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
105 volatile union ice_rx_flex_desc *rxdp)
107 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
108 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
109 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
113 if (desc->flow_id != 0xFFFFFFFF) {
114 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
115 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
118 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
119 stat_err = rte_le_to_cpu_16(desc->status_error0);
120 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
121 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
122 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
128 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
130 volatile union ice_rx_flex_desc *rxdp)
132 volatile struct ice_32b_rx_flex_desc_comms *desc =
133 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
136 stat_err = rte_le_to_cpu_16(desc->status_error0);
137 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
138 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
139 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
142 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
143 if (desc->flow_id != 0xFFFFFFFF) {
144 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
145 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
148 if (rxq->xtr_ol_flag) {
149 uint32_t metadata = 0;
151 stat_err = rte_le_to_cpu_16(desc->status_error1);
153 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
154 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
156 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
158 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
161 mb->ol_flags |= rxq->xtr_ol_flag;
163 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
170 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
172 volatile union ice_rx_flex_desc *rxdp)
174 volatile struct ice_32b_rx_flex_desc_comms *desc =
175 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
178 stat_err = rte_le_to_cpu_16(desc->status_error0);
179 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
180 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
181 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
184 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
185 if (desc->flow_id != 0xFFFFFFFF) {
186 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
187 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
190 if (rxq->xtr_ol_flag) {
191 uint32_t metadata = 0;
193 if (desc->flex_ts.flex.aux0 != 0xFFFF)
194 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
195 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
196 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
199 mb->ol_flags |= rxq->xtr_ol_flag;
201 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
207 static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
208 [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
209 [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
210 [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
211 [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
212 [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
213 [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
214 [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
215 [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
219 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
224 case ICE_RXDID_COMMS_AUX_VLAN:
225 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
228 case ICE_RXDID_COMMS_AUX_IPV4:
229 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
232 case ICE_RXDID_COMMS_AUX_IPV6:
233 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
236 case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
237 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
240 case ICE_RXDID_COMMS_AUX_TCP:
241 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
244 case ICE_RXDID_COMMS_AUX_IP_OFFSET:
245 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
248 case ICE_RXDID_COMMS_GENERIC:
250 case ICE_RXDID_COMMS_OVS:
254 /* update this according to the RXDID for PROTO_XTR_NONE */
255 rxq->rxdid = ICE_RXDID_COMMS_OVS;
259 if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
260 rxq->xtr_ol_flag = 0;
263 static enum ice_status
264 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
266 struct ice_vsi *vsi = rxq->vsi;
267 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
268 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
269 struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
270 struct ice_rlan_ctx rx_ctx;
273 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
275 struct ice_adapter *ad = rxq->vsi->adapter;
276 uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
278 /* Set buffer size as the head split is disabled. */
279 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
280 RTE_PKTMBUF_HEADROOM);
282 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
284 RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
287 if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN ||
288 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
289 PMD_DRV_LOG(ERR, "maximum packet length must "
290 "be larger than %u and smaller than %u",
291 (uint32_t)RTE_ETHER_MIN_LEN,
292 (uint32_t)ICE_FRAME_SIZE_MAX);
296 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
297 /* Register mbuf field and flag for Rx timestamp */
298 err = rte_mbuf_dyn_rx_timestamp_register(
299 &ice_timestamp_dynfield_offset,
300 &ice_timestamp_dynflag);
303 "Cannot register mbuf field/flag for timestamp");
308 memset(&rx_ctx, 0, sizeof(rx_ctx));
310 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
311 rx_ctx.qlen = rxq->nb_rx_desc;
312 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
313 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
314 rx_ctx.dtype = 0; /* No Header Split mode */
315 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
316 rx_ctx.dsize = 1; /* 32B descriptors */
318 rx_ctx.rxmax = rxq->max_pkt_len;
319 /* TPH: Transaction Layer Packet (TLP) processing hints */
320 rx_ctx.tphrdesc_ena = 1;
321 rx_ctx.tphwdesc_ena = 1;
322 rx_ctx.tphdata_ena = 1;
323 rx_ctx.tphhead_ena = 1;
324 /* Low Receive Queue Threshold defined in 64 descriptors units.
325 * When the number of free descriptors goes below the lrxqthresh,
326 * an immediate interrupt is triggered.
328 rx_ctx.lrxqthresh = 2;
329 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
332 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
334 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
336 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
337 rxq->port_id, rxq->queue_id, rxdid);
339 if (!(pf->supported_rxdid & BIT(rxdid))) {
340 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
345 ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
347 /* Enable Flexible Descriptors in the queue context which
348 * allows this driver to select a specific receive descriptor format
350 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
351 QRXFLXP_CNTXT_RXDID_IDX_M;
353 /* increasing context priority to pick up profile ID;
354 * default is 0x01; setting to 0x03 to ensure profile
355 * is programming if prev context is of same priority
357 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
358 QRXFLXP_CNTXT_RXDID_PRIO_M;
360 if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
361 regval |= QRXFLXP_CNTXT_TS_M;
363 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
365 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
367 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
371 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
373 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
378 /* Check if scattered RX needs to be used. */
379 if (frame_size > buf_size)
380 dev_data->scattered_rx = 1;
382 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
384 /* Init the Rx tail register*/
385 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
390 /* Allocate mbufs for all descriptors in rx queue */
392 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
394 struct ice_rx_entry *rxe = rxq->sw_ring;
398 for (i = 0; i < rxq->nb_rx_desc; i++) {
399 volatile union ice_rx_flex_desc *rxd;
400 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
402 if (unlikely(!mbuf)) {
403 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
407 rte_mbuf_refcnt_set(mbuf, 1);
409 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
411 mbuf->port = rxq->port_id;
414 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
416 rxd = &rxq->rx_ring[i];
417 rxd->read.pkt_addr = dma_addr;
418 rxd->read.hdr_addr = 0;
419 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
429 /* Free all mbufs for descriptors in rx queue */
431 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
435 if (!rxq || !rxq->sw_ring) {
436 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
440 for (i = 0; i < rxq->nb_rx_desc; i++) {
441 if (rxq->sw_ring[i].mbuf) {
442 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
443 rxq->sw_ring[i].mbuf = NULL;
446 if (rxq->rx_nb_avail == 0)
448 for (i = 0; i < rxq->rx_nb_avail; i++)
449 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
451 rxq->rx_nb_avail = 0;
454 /* turn on or off rx queue
455 * @q_idx: queue index in pf scope
456 * @on: turn on or off the queue
459 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
464 /* QRX_CTRL = QRX_ENA */
465 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
468 if (reg & QRX_CTRL_QENA_STAT_M)
469 return 0; /* Already on, skip */
470 reg |= QRX_CTRL_QENA_REQ_M;
472 if (!(reg & QRX_CTRL_QENA_STAT_M))
473 return 0; /* Already off, skip */
474 reg &= ~QRX_CTRL_QENA_REQ_M;
477 /* Write the register */
478 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
479 /* Check the result. It is said that QENA_STAT
480 * follows the QENA_REQ not more than 10 use.
481 * TODO: need to change the wait counter later
483 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
484 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
485 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
487 if ((reg & QRX_CTRL_QENA_REQ_M) &&
488 (reg & QRX_CTRL_QENA_STAT_M))
491 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
492 !(reg & QRX_CTRL_QENA_STAT_M))
497 /* Check if it is timeout */
498 if (j >= ICE_CHK_Q_ENA_COUNT) {
499 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
500 (on ? "enable" : "disable"), q_idx);
508 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
512 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
513 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
514 "rxq->rx_free_thresh=%d, "
515 "ICE_RX_MAX_BURST=%d",
516 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
518 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
519 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
520 "rxq->rx_free_thresh=%d, "
521 "rxq->nb_rx_desc=%d",
522 rxq->rx_free_thresh, rxq->nb_rx_desc);
524 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
525 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
526 "rxq->nb_rx_desc=%d, "
527 "rxq->rx_free_thresh=%d",
528 rxq->nb_rx_desc, rxq->rx_free_thresh);
535 /* reset fields in ice_rx_queue back to default */
537 ice_reset_rx_queue(struct ice_rx_queue *rxq)
543 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
547 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
549 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
550 ((volatile char *)rxq->rx_ring)[i] = 0;
552 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
553 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
554 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
556 rxq->rx_nb_avail = 0;
557 rxq->rx_next_avail = 0;
558 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
562 rxq->pkt_first_seg = NULL;
563 rxq->pkt_last_seg = NULL;
565 rxq->rxrearm_start = 0;
570 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
572 struct ice_rx_queue *rxq;
574 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
576 PMD_INIT_FUNC_TRACE();
578 if (rx_queue_id >= dev->data->nb_rx_queues) {
579 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
580 rx_queue_id, dev->data->nb_rx_queues);
584 rxq = dev->data->rx_queues[rx_queue_id];
585 if (!rxq || !rxq->q_set) {
586 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
591 err = ice_program_hw_rx_queue(rxq);
593 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
598 err = ice_alloc_rx_queue_mbufs(rxq);
600 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
604 /* Init the RX tail register. */
605 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
607 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
609 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
612 rxq->rx_rel_mbufs(rxq);
613 ice_reset_rx_queue(rxq);
617 dev->data->rx_queue_state[rx_queue_id] =
618 RTE_ETH_QUEUE_STATE_STARTED;
624 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
626 struct ice_rx_queue *rxq;
628 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
630 if (rx_queue_id < dev->data->nb_rx_queues) {
631 rxq = dev->data->rx_queues[rx_queue_id];
633 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
635 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
639 rxq->rx_rel_mbufs(rxq);
640 ice_reset_rx_queue(rxq);
641 dev->data->rx_queue_state[rx_queue_id] =
642 RTE_ETH_QUEUE_STATE_STOPPED;
649 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
651 struct ice_tx_queue *txq;
655 struct ice_aqc_add_tx_qgrp *txq_elem;
656 struct ice_tlan_ctx tx_ctx;
659 PMD_INIT_FUNC_TRACE();
661 if (tx_queue_id >= dev->data->nb_tx_queues) {
662 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
663 tx_queue_id, dev->data->nb_tx_queues);
667 txq = dev->data->tx_queues[tx_queue_id];
668 if (!txq || !txq->q_set) {
669 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
674 buf_len = ice_struct_size(txq_elem, txqs, 1);
675 txq_elem = ice_malloc(hw, buf_len);
680 hw = ICE_VSI_TO_HW(vsi);
682 memset(&tx_ctx, 0, sizeof(tx_ctx));
683 txq_elem->num_txqs = 1;
684 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
686 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
687 tx_ctx.qlen = txq->nb_tx_desc;
688 tx_ctx.pf_num = hw->pf_id;
689 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
690 tx_ctx.src_vsi = vsi->vsi_id;
691 tx_ctx.port_num = hw->port_info->lport;
692 tx_ctx.tso_ena = 1; /* tso enable */
693 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
694 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
697 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
700 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
702 /* Init the Tx tail register*/
703 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
705 /* Fix me, we assume TC always 0 here */
706 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
707 txq_elem, buf_len, NULL);
709 PMD_DRV_LOG(ERR, "Failed to add lan txq");
713 /* store the schedule node id */
714 txq->q_teid = txq_elem->txqs[0].q_teid;
716 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
722 static enum ice_status
723 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
725 struct ice_vsi *vsi = rxq->vsi;
726 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
727 uint32_t rxdid = ICE_RXDID_LEGACY_1;
728 struct ice_rlan_ctx rx_ctx;
733 rxq->rx_buf_len = 1024;
735 memset(&rx_ctx, 0, sizeof(rx_ctx));
737 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
738 rx_ctx.qlen = rxq->nb_rx_desc;
739 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
740 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
741 rx_ctx.dtype = 0; /* No Header Split mode */
742 rx_ctx.dsize = 1; /* 32B descriptors */
743 rx_ctx.rxmax = ICE_ETH_MAX_LEN;
744 /* TPH: Transaction Layer Packet (TLP) processing hints */
745 rx_ctx.tphrdesc_ena = 1;
746 rx_ctx.tphwdesc_ena = 1;
747 rx_ctx.tphdata_ena = 1;
748 rx_ctx.tphhead_ena = 1;
749 /* Low Receive Queue Threshold defined in 64 descriptors units.
750 * When the number of free descriptors goes below the lrxqthresh,
751 * an immediate interrupt is triggered.
753 rx_ctx.lrxqthresh = 2;
754 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
757 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
759 /* Enable Flexible Descriptors in the queue context which
760 * allows this driver to select a specific receive descriptor format
762 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
763 QRXFLXP_CNTXT_RXDID_IDX_M;
765 /* increasing context priority to pick up profile ID;
766 * default is 0x01; setting to 0x03 to ensure profile
767 * is programming if prev context is of same priority
769 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
770 QRXFLXP_CNTXT_RXDID_PRIO_M;
772 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
774 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
776 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
780 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
782 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
787 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
789 /* Init the Rx tail register*/
790 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
796 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
798 struct ice_rx_queue *rxq;
800 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
801 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
803 PMD_INIT_FUNC_TRACE();
806 if (!rxq || !rxq->q_set) {
807 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
812 err = ice_fdir_program_hw_rx_queue(rxq);
814 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
819 /* Init the RX tail register. */
820 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
822 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
824 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
827 ice_reset_rx_queue(rxq);
835 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
837 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
838 struct ice_tx_queue *txq;
842 struct ice_aqc_add_tx_qgrp *txq_elem;
843 struct ice_tlan_ctx tx_ctx;
846 PMD_INIT_FUNC_TRACE();
849 if (!txq || !txq->q_set) {
850 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
855 buf_len = ice_struct_size(txq_elem, txqs, 1);
856 txq_elem = ice_malloc(hw, buf_len);
861 hw = ICE_VSI_TO_HW(vsi);
863 memset(&tx_ctx, 0, sizeof(tx_ctx));
864 txq_elem->num_txqs = 1;
865 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
867 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
868 tx_ctx.qlen = txq->nb_tx_desc;
869 tx_ctx.pf_num = hw->pf_id;
870 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
871 tx_ctx.src_vsi = vsi->vsi_id;
872 tx_ctx.port_num = hw->port_info->lport;
873 tx_ctx.tso_ena = 1; /* tso enable */
874 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
875 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
877 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
880 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
882 /* Init the Tx tail register*/
883 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
885 /* Fix me, we assume TC always 0 here */
886 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
887 txq_elem, buf_len, NULL);
889 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
893 /* store the schedule node id */
894 txq->q_teid = txq_elem->txqs[0].q_teid;
900 /* Free all mbufs for descriptors in tx queue */
902 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
906 if (!txq || !txq->sw_ring) {
907 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
911 for (i = 0; i < txq->nb_tx_desc; i++) {
912 if (txq->sw_ring[i].mbuf) {
913 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
914 txq->sw_ring[i].mbuf = NULL;
920 ice_reset_tx_queue(struct ice_tx_queue *txq)
922 struct ice_tx_entry *txe;
923 uint16_t i, prev, size;
926 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
931 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
932 for (i = 0; i < size; i++)
933 ((volatile char *)txq->tx_ring)[i] = 0;
935 prev = (uint16_t)(txq->nb_tx_desc - 1);
936 for (i = 0; i < txq->nb_tx_desc; i++) {
937 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
939 txd->cmd_type_offset_bsz =
940 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
943 txe[prev].next_id = i;
947 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
948 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
953 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
954 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
958 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
960 struct ice_tx_queue *txq;
961 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
962 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
963 struct ice_vsi *vsi = pf->main_vsi;
964 enum ice_status status;
967 uint16_t q_handle = tx_queue_id;
969 if (tx_queue_id >= dev->data->nb_tx_queues) {
970 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
971 tx_queue_id, dev->data->nb_tx_queues);
975 txq = dev->data->tx_queues[tx_queue_id];
977 PMD_DRV_LOG(ERR, "TX queue %u is not available",
982 q_ids[0] = txq->reg_idx;
983 q_teids[0] = txq->q_teid;
985 /* Fix me, we assume TC always 0 here */
986 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
987 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
988 if (status != ICE_SUCCESS) {
989 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
993 txq->tx_rel_mbufs(txq);
994 ice_reset_tx_queue(txq);
995 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1001 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1003 struct ice_rx_queue *rxq;
1005 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1006 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1010 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1012 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1016 rxq->rx_rel_mbufs(rxq);
1022 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1024 struct ice_tx_queue *txq;
1025 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1026 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1027 struct ice_vsi *vsi = pf->main_vsi;
1028 enum ice_status status;
1030 uint32_t q_teids[1];
1031 uint16_t q_handle = tx_queue_id;
1035 PMD_DRV_LOG(ERR, "TX queue %u is not available",
1041 q_ids[0] = txq->reg_idx;
1042 q_teids[0] = txq->q_teid;
1044 /* Fix me, we assume TC always 0 here */
1045 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1046 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1047 if (status != ICE_SUCCESS) {
1048 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1052 txq->tx_rel_mbufs(txq);
1058 ice_rx_queue_setup(struct rte_eth_dev *dev,
1061 unsigned int socket_id,
1062 const struct rte_eth_rxconf *rx_conf,
1063 struct rte_mempool *mp)
1065 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1066 struct ice_adapter *ad =
1067 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1068 struct ice_vsi *vsi = pf->main_vsi;
1069 struct ice_rx_queue *rxq;
1070 const struct rte_memzone *rz;
1073 int use_def_burst_func = 1;
1076 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1077 nb_desc > ICE_MAX_RING_DESC ||
1078 nb_desc < ICE_MIN_RING_DESC) {
1079 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1080 "invalid", nb_desc);
1084 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1086 /* Free memory if needed */
1087 if (dev->data->rx_queues[queue_idx]) {
1088 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1089 dev->data->rx_queues[queue_idx] = NULL;
1092 /* Allocate the rx queue data structure */
1093 rxq = rte_zmalloc_socket(NULL,
1094 sizeof(struct ice_rx_queue),
1095 RTE_CACHE_LINE_SIZE,
1098 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1099 "rx queue data structure");
1103 rxq->nb_rx_desc = nb_desc;
1104 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1105 rxq->queue_id = queue_idx;
1106 rxq->offloads = offloads;
1108 rxq->reg_idx = vsi->base_queue + queue_idx;
1109 rxq->port_id = dev->data->port_id;
1110 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
1111 rxq->crc_len = RTE_ETHER_CRC_LEN;
1115 rxq->drop_en = rx_conf->rx_drop_en;
1117 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1118 rxq->proto_xtr = pf->proto_xtr != NULL ?
1119 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1121 /* Allocate the maximun number of RX ring hardware descriptor. */
1122 len = ICE_MAX_RING_DESC;
1125 * Allocating a little more memory because vectorized/bulk_alloc Rx
1126 * functions doesn't check boundaries each time.
1128 len += ICE_RX_MAX_BURST;
1130 /* Allocate the maximum number of RX ring hardware descriptor. */
1131 ring_size = sizeof(union ice_rx_flex_desc) * len;
1132 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1133 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1134 ring_size, ICE_RING_BASE_ALIGN,
1137 ice_rx_queue_release(rxq);
1138 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1143 /* Zero all the descriptors in the ring. */
1144 memset(rz->addr, 0, ring_size);
1146 rxq->rx_ring_dma = rz->iova;
1147 rxq->rx_ring = rz->addr;
1149 /* always reserve more for bulk alloc */
1150 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1152 /* Allocate the software ring. */
1153 rxq->sw_ring = rte_zmalloc_socket(NULL,
1154 sizeof(struct ice_rx_entry) * len,
1155 RTE_CACHE_LINE_SIZE,
1157 if (!rxq->sw_ring) {
1158 ice_rx_queue_release(rxq);
1159 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1163 ice_reset_rx_queue(rxq);
1165 dev->data->rx_queues[queue_idx] = rxq;
1166 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1168 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1170 if (!use_def_burst_func) {
1171 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1172 "satisfied. Rx Burst Bulk Alloc function will be "
1173 "used on port=%d, queue=%d.",
1174 rxq->port_id, rxq->queue_id);
1176 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1177 "not satisfied, Scattered Rx is requested. "
1178 "on port=%d, queue=%d.",
1179 rxq->port_id, rxq->queue_id);
1180 ad->rx_bulk_alloc_allowed = false;
1187 ice_rx_queue_release(void *rxq)
1189 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1192 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1197 rte_free(q->sw_ring);
1198 rte_memzone_free(q->mz);
1203 ice_tx_queue_setup(struct rte_eth_dev *dev,
1206 unsigned int socket_id,
1207 const struct rte_eth_txconf *tx_conf)
1209 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1210 struct ice_vsi *vsi = pf->main_vsi;
1211 struct ice_tx_queue *txq;
1212 const struct rte_memzone *tz;
1214 uint16_t tx_rs_thresh, tx_free_thresh;
1217 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1219 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1220 nb_desc > ICE_MAX_RING_DESC ||
1221 nb_desc < ICE_MIN_RING_DESC) {
1222 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1223 "invalid", nb_desc);
1228 * The following two parameters control the setting of the RS bit on
1229 * transmit descriptors. TX descriptors will have their RS bit set
1230 * after txq->tx_rs_thresh descriptors have been used. The TX
1231 * descriptor ring will be cleaned after txq->tx_free_thresh
1232 * descriptors are used or if the number of descriptors required to
1233 * transmit a packet is greater than the number of free TX descriptors.
1235 * The following constraints must be satisfied:
1236 * - tx_rs_thresh must be greater than 0.
1237 * - tx_rs_thresh must be less than the size of the ring minus 2.
1238 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1239 * - tx_rs_thresh must be a divisor of the ring size.
1240 * - tx_free_thresh must be greater than 0.
1241 * - tx_free_thresh must be less than the size of the ring minus 3.
1242 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1244 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1245 * race condition, hence the maximum threshold constraints. When set
1246 * to zero use default values.
1248 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1249 tx_conf->tx_free_thresh :
1250 ICE_DEFAULT_TX_FREE_THRESH);
1251 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1253 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1254 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1255 if (tx_conf->tx_rs_thresh)
1256 tx_rs_thresh = tx_conf->tx_rs_thresh;
1257 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1258 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1259 "exceed nb_desc. (tx_rs_thresh=%u "
1260 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1261 (unsigned int)tx_rs_thresh,
1262 (unsigned int)tx_free_thresh,
1263 (unsigned int)nb_desc,
1264 (int)dev->data->port_id,
1268 if (tx_rs_thresh >= (nb_desc - 2)) {
1269 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1270 "number of TX descriptors minus 2. "
1271 "(tx_rs_thresh=%u port=%d queue=%d)",
1272 (unsigned int)tx_rs_thresh,
1273 (int)dev->data->port_id,
1277 if (tx_free_thresh >= (nb_desc - 3)) {
1278 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1279 "tx_free_thresh must be less than the "
1280 "number of TX descriptors minus 3. "
1281 "(tx_free_thresh=%u port=%d queue=%d)",
1282 (unsigned int)tx_free_thresh,
1283 (int)dev->data->port_id,
1287 if (tx_rs_thresh > tx_free_thresh) {
1288 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1289 "equal to tx_free_thresh. (tx_free_thresh=%u"
1290 " tx_rs_thresh=%u port=%d queue=%d)",
1291 (unsigned int)tx_free_thresh,
1292 (unsigned int)tx_rs_thresh,
1293 (int)dev->data->port_id,
1297 if ((nb_desc % tx_rs_thresh) != 0) {
1298 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1299 "number of TX descriptors. (tx_rs_thresh=%u"
1300 " port=%d queue=%d)",
1301 (unsigned int)tx_rs_thresh,
1302 (int)dev->data->port_id,
1306 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1307 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1308 "tx_rs_thresh is greater than 1. "
1309 "(tx_rs_thresh=%u port=%d queue=%d)",
1310 (unsigned int)tx_rs_thresh,
1311 (int)dev->data->port_id,
1316 /* Free memory if needed. */
1317 if (dev->data->tx_queues[queue_idx]) {
1318 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1319 dev->data->tx_queues[queue_idx] = NULL;
1322 /* Allocate the TX queue data structure. */
1323 txq = rte_zmalloc_socket(NULL,
1324 sizeof(struct ice_tx_queue),
1325 RTE_CACHE_LINE_SIZE,
1328 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1329 "tx queue structure");
1333 /* Allocate TX hardware ring descriptors. */
1334 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1335 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1336 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1337 ring_size, ICE_RING_BASE_ALIGN,
1340 ice_tx_queue_release(txq);
1341 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1346 txq->nb_tx_desc = nb_desc;
1347 txq->tx_rs_thresh = tx_rs_thresh;
1348 txq->tx_free_thresh = tx_free_thresh;
1349 txq->pthresh = tx_conf->tx_thresh.pthresh;
1350 txq->hthresh = tx_conf->tx_thresh.hthresh;
1351 txq->wthresh = tx_conf->tx_thresh.wthresh;
1352 txq->queue_id = queue_idx;
1354 txq->reg_idx = vsi->base_queue + queue_idx;
1355 txq->port_id = dev->data->port_id;
1356 txq->offloads = offloads;
1358 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1360 txq->tx_ring_dma = tz->iova;
1361 txq->tx_ring = tz->addr;
1363 /* Allocate software ring */
1365 rte_zmalloc_socket(NULL,
1366 sizeof(struct ice_tx_entry) * nb_desc,
1367 RTE_CACHE_LINE_SIZE,
1369 if (!txq->sw_ring) {
1370 ice_tx_queue_release(txq);
1371 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1375 ice_reset_tx_queue(txq);
1377 dev->data->tx_queues[queue_idx] = txq;
1378 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1379 ice_set_tx_function_flag(dev, txq);
1385 ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1387 ice_rx_queue_release(dev->data->rx_queues[qid]);
1391 ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1393 ice_tx_queue_release(dev->data->tx_queues[qid]);
1397 ice_tx_queue_release(void *txq)
1399 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1402 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1407 rte_free(q->sw_ring);
1408 rte_memzone_free(q->mz);
1413 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1414 struct rte_eth_rxq_info *qinfo)
1416 struct ice_rx_queue *rxq;
1418 rxq = dev->data->rx_queues[queue_id];
1420 qinfo->mp = rxq->mp;
1421 qinfo->scattered_rx = dev->data->scattered_rx;
1422 qinfo->nb_desc = rxq->nb_rx_desc;
1424 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1425 qinfo->conf.rx_drop_en = rxq->drop_en;
1426 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1430 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1431 struct rte_eth_txq_info *qinfo)
1433 struct ice_tx_queue *txq;
1435 txq = dev->data->tx_queues[queue_id];
1437 qinfo->nb_desc = txq->nb_tx_desc;
1439 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1440 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1441 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1443 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1444 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1445 qinfo->conf.offloads = txq->offloads;
1446 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1450 ice_rx_queue_count(void *rx_queue)
1452 #define ICE_RXQ_SCAN_INTERVAL 4
1453 volatile union ice_rx_flex_desc *rxdp;
1454 struct ice_rx_queue *rxq;
1458 rxdp = &rxq->rx_ring[rxq->rx_tail];
1459 while ((desc < rxq->nb_rx_desc) &&
1460 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1461 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1463 * Check the DD bit of a rx descriptor of each 4 in a group,
1464 * to avoid checking too frequently and downgrading performance
1467 desc += ICE_RXQ_SCAN_INTERVAL;
1468 rxdp += ICE_RXQ_SCAN_INTERVAL;
1469 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1470 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1471 desc - rxq->nb_rx_desc]);
1477 #define ICE_RX_FLEX_ERR0_BITS \
1478 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1479 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1480 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1481 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1482 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1483 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1485 /* Rx L3/L4 checksum */
1486 static inline uint64_t
1487 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1491 /* check if HW has decoded the packet and checksum */
1492 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1495 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1496 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1500 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1501 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1503 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1505 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1506 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1508 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1510 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1511 flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1513 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1514 flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
1516 flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
1522 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1524 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1525 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1526 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1528 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1529 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1530 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1535 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1536 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1537 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1538 mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
1539 RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
1540 mb->vlan_tci_outer = mb->vlan_tci;
1541 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1542 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1543 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1544 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1546 mb->vlan_tci_outer = 0;
1549 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1550 mb->vlan_tci, mb->vlan_tci_outer);
1553 #define ICE_LOOK_AHEAD 8
1554 #if (ICE_LOOK_AHEAD != 8)
1555 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1558 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1560 volatile union ice_rx_flex_desc *rxdp;
1561 struct ice_rx_entry *rxep;
1562 struct rte_mbuf *mb;
1565 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1566 int32_t i, j, nb_rx = 0;
1567 uint64_t pkt_flags = 0;
1568 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1569 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1570 struct ice_vsi *vsi = rxq->vsi;
1571 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1573 struct ice_adapter *ad = rxq->vsi->adapter;
1575 rxdp = &rxq->rx_ring[rxq->rx_tail];
1576 rxep = &rxq->sw_ring[rxq->rx_tail];
1578 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1580 /* Make sure there is at least 1 packet to receive */
1581 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1585 * Scan LOOK_AHEAD descriptors at a time to determine which
1586 * descriptors reference packets that are ready to be received.
1588 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1589 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1590 /* Read desc statuses backwards to avoid race condition */
1591 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1592 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1596 /* Compute how many status bits were set */
1597 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1598 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1602 /* Translate descriptor info to mbuf parameters */
1603 for (j = 0; j < nb_dd; j++) {
1605 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1606 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1607 mb->data_len = pkt_len;
1608 mb->pkt_len = pkt_len;
1610 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1611 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1612 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1613 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1614 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1615 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
1616 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1617 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1618 ts_ns = ice_tstamp_convert_32b_64b(hw,
1619 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
1620 if (ice_timestamp_dynflag > 0) {
1621 *RTE_MBUF_DYNFIELD(mb,
1622 ice_timestamp_dynfield_offset,
1623 rte_mbuf_timestamp_t *) = ts_ns;
1624 mb->ol_flags |= ice_timestamp_dynflag;
1628 if (ad->ptp_ena && ((mb->packet_type &
1629 RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1631 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1632 mb->timesync = rxq->queue_id;
1633 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
1636 mb->ol_flags |= pkt_flags;
1639 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1640 rxq->rx_stage[i + j] = rxep[j].mbuf;
1642 if (nb_dd != ICE_LOOK_AHEAD)
1646 /* Clear software ring entries */
1647 for (i = 0; i < nb_rx; i++)
1648 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1650 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1651 "port_id=%u, queue_id=%u, nb_rx=%d",
1652 rxq->port_id, rxq->queue_id, nb_rx);
1657 static inline uint16_t
1658 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1659 struct rte_mbuf **rx_pkts,
1663 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1665 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1667 for (i = 0; i < nb_pkts; i++)
1668 rx_pkts[i] = stage[i];
1670 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1671 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1677 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1679 volatile union ice_rx_flex_desc *rxdp;
1680 struct ice_rx_entry *rxep;
1681 struct rte_mbuf *mb;
1682 uint16_t alloc_idx, i;
1686 /* Allocate buffers in bulk */
1687 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1688 (rxq->rx_free_thresh - 1));
1689 rxep = &rxq->sw_ring[alloc_idx];
1690 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1691 rxq->rx_free_thresh);
1692 if (unlikely(diag != 0)) {
1693 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1697 rxdp = &rxq->rx_ring[alloc_idx];
1698 for (i = 0; i < rxq->rx_free_thresh; i++) {
1699 if (likely(i < (rxq->rx_free_thresh - 1)))
1700 /* Prefetch next mbuf */
1701 rte_prefetch0(rxep[i + 1].mbuf);
1704 rte_mbuf_refcnt_set(mb, 1);
1706 mb->data_off = RTE_PKTMBUF_HEADROOM;
1708 mb->port = rxq->port_id;
1709 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1710 rxdp[i].read.hdr_addr = 0;
1711 rxdp[i].read.pkt_addr = dma_addr;
1714 /* Update rx tail regsiter */
1715 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1717 rxq->rx_free_trigger =
1718 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1719 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1720 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1725 static inline uint16_t
1726 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1728 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1734 if (rxq->rx_nb_avail)
1735 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1737 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1738 rxq->rx_next_avail = 0;
1739 rxq->rx_nb_avail = nb_rx;
1740 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1742 if (rxq->rx_tail > rxq->rx_free_trigger) {
1743 if (ice_rx_alloc_bufs(rxq) != 0) {
1746 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
1747 rxq->rx_free_thresh;
1748 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1749 "port_id=%u, queue_id=%u",
1750 rxq->port_id, rxq->queue_id);
1751 rxq->rx_nb_avail = 0;
1752 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1753 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1754 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1760 if (rxq->rx_tail >= rxq->nb_rx_desc)
1763 if (rxq->rx_nb_avail)
1764 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1770 ice_recv_pkts_bulk_alloc(void *rx_queue,
1771 struct rte_mbuf **rx_pkts,
1778 if (unlikely(nb_pkts == 0))
1781 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1782 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1785 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1786 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1787 nb_rx = (uint16_t)(nb_rx + count);
1788 nb_pkts = (uint16_t)(nb_pkts - count);
1797 ice_recv_scattered_pkts(void *rx_queue,
1798 struct rte_mbuf **rx_pkts,
1801 struct ice_rx_queue *rxq = rx_queue;
1802 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1803 volatile union ice_rx_flex_desc *rxdp;
1804 union ice_rx_flex_desc rxd;
1805 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1806 struct ice_rx_entry *rxe;
1807 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1808 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1809 struct rte_mbuf *nmb; /* new allocated mbuf */
1810 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1811 uint16_t rx_id = rxq->rx_tail;
1813 uint16_t nb_hold = 0;
1814 uint16_t rx_packet_len;
1815 uint16_t rx_stat_err0;
1818 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1819 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1820 struct ice_vsi *vsi = rxq->vsi;
1821 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1823 struct ice_adapter *ad = rxq->vsi->adapter;
1825 while (nb_rx < nb_pkts) {
1826 rxdp = &rx_ring[rx_id];
1827 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1829 /* Check the DD bit first */
1830 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1834 nmb = rte_mbuf_raw_alloc(rxq->mp);
1835 if (unlikely(!nmb)) {
1836 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
1839 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1842 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1844 if (unlikely(rx_id == rxq->nb_rx_desc))
1847 /* Prefetch next mbuf */
1848 rte_prefetch0(sw_ring[rx_id].mbuf);
1851 * When next RX descriptor is on a cache line boundary,
1852 * prefetch the next 4 RX descriptors and next 8 pointers
1855 if ((rx_id & 0x3) == 0) {
1856 rte_prefetch0(&rx_ring[rx_id]);
1857 rte_prefetch0(&sw_ring[rx_id]);
1863 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1865 /* Set data buffer address and data length of the mbuf */
1866 rxdp->read.hdr_addr = 0;
1867 rxdp->read.pkt_addr = dma_addr;
1868 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1869 ICE_RX_FLX_DESC_PKT_LEN_M;
1870 rxm->data_len = rx_packet_len;
1871 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1874 * If this is the first buffer of the received packet, set the
1875 * pointer to the first mbuf of the packet and initialize its
1876 * context. Otherwise, update the total length and the number
1877 * of segments of the current scattered packet, and update the
1878 * pointer to the last mbuf of the current packet.
1882 first_seg->nb_segs = 1;
1883 first_seg->pkt_len = rx_packet_len;
1885 first_seg->pkt_len =
1886 (uint16_t)(first_seg->pkt_len +
1888 first_seg->nb_segs++;
1889 last_seg->next = rxm;
1893 * If this is not the last buffer of the received packet,
1894 * update the pointer to the last mbuf of the current scattered
1895 * packet and continue to parse the RX ring.
1897 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1903 * This is the last buffer of the received packet. If the CRC
1904 * is not stripped by the hardware:
1905 * - Subtract the CRC length from the total packet length.
1906 * - If the last buffer only contains the whole CRC or a part
1907 * of it, free the mbuf associated to the last buffer. If part
1908 * of the CRC is also contained in the previous mbuf, subtract
1909 * the length of that CRC part from the data length of the
1913 if (unlikely(rxq->crc_len > 0)) {
1914 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1915 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1916 rte_pktmbuf_free_seg(rxm);
1917 first_seg->nb_segs--;
1918 last_seg->data_len =
1919 (uint16_t)(last_seg->data_len -
1920 (RTE_ETHER_CRC_LEN - rx_packet_len));
1921 last_seg->next = NULL;
1923 rxm->data_len = (uint16_t)(rx_packet_len -
1927 first_seg->port = rxq->port_id;
1928 first_seg->ol_flags = 0;
1929 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1930 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1931 ice_rxd_to_vlan_tci(first_seg, &rxd);
1932 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
1933 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1934 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1935 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1936 ts_ns = ice_tstamp_convert_32b_64b(hw,
1937 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
1938 if (ice_timestamp_dynflag > 0) {
1939 *RTE_MBUF_DYNFIELD(first_seg,
1940 ice_timestamp_dynfield_offset,
1941 rte_mbuf_timestamp_t *) = ts_ns;
1942 first_seg->ol_flags |= ice_timestamp_dynflag;
1946 if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
1947 == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1949 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
1950 first_seg->timesync = rxq->queue_id;
1951 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
1954 first_seg->ol_flags |= pkt_flags;
1955 /* Prefetch data of first segment, if configured to do so. */
1956 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1957 first_seg->data_off));
1958 rx_pkts[nb_rx++] = first_seg;
1962 /* Record index of the next RX descriptor to probe. */
1963 rxq->rx_tail = rx_id;
1964 rxq->pkt_first_seg = first_seg;
1965 rxq->pkt_last_seg = last_seg;
1968 * If the number of free RX descriptors is greater than the RX free
1969 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1970 * register. Update the RDT with the value of the last processed RX
1971 * descriptor minus 1, to guarantee that the RDT register is never
1972 * equal to the RDH register, which creates a "full" ring situtation
1973 * from the hardware point of view.
1975 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1976 if (nb_hold > rxq->rx_free_thresh) {
1977 rx_id = (uint16_t)(rx_id == 0 ?
1978 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1979 /* write TAIL register */
1980 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1983 rxq->nb_rx_hold = nb_hold;
1985 /* return received packet in the burst */
1990 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1992 struct ice_adapter *ad =
1993 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1994 const uint32_t *ptypes;
1996 static const uint32_t ptypes_os[] = {
1997 /* refers to ice_get_default_pkt_type() */
1999 RTE_PTYPE_L2_ETHER_TIMESYNC,
2000 RTE_PTYPE_L2_ETHER_LLDP,
2001 RTE_PTYPE_L2_ETHER_ARP,
2002 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2003 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2006 RTE_PTYPE_L4_NONFRAG,
2010 RTE_PTYPE_TUNNEL_GRENAT,
2011 RTE_PTYPE_TUNNEL_IP,
2012 RTE_PTYPE_INNER_L2_ETHER,
2013 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2014 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2015 RTE_PTYPE_INNER_L4_FRAG,
2016 RTE_PTYPE_INNER_L4_ICMP,
2017 RTE_PTYPE_INNER_L4_NONFRAG,
2018 RTE_PTYPE_INNER_L4_SCTP,
2019 RTE_PTYPE_INNER_L4_TCP,
2020 RTE_PTYPE_INNER_L4_UDP,
2024 static const uint32_t ptypes_comms[] = {
2025 /* refers to ice_get_default_pkt_type() */
2027 RTE_PTYPE_L2_ETHER_TIMESYNC,
2028 RTE_PTYPE_L2_ETHER_LLDP,
2029 RTE_PTYPE_L2_ETHER_ARP,
2030 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2031 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2034 RTE_PTYPE_L4_NONFRAG,
2038 RTE_PTYPE_TUNNEL_GRENAT,
2039 RTE_PTYPE_TUNNEL_IP,
2040 RTE_PTYPE_INNER_L2_ETHER,
2041 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2042 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2043 RTE_PTYPE_INNER_L4_FRAG,
2044 RTE_PTYPE_INNER_L4_ICMP,
2045 RTE_PTYPE_INNER_L4_NONFRAG,
2046 RTE_PTYPE_INNER_L4_SCTP,
2047 RTE_PTYPE_INNER_L4_TCP,
2048 RTE_PTYPE_INNER_L4_UDP,
2049 RTE_PTYPE_TUNNEL_GTPC,
2050 RTE_PTYPE_TUNNEL_GTPU,
2051 RTE_PTYPE_L2_ETHER_PPPOE,
2055 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
2056 ptypes = ptypes_comms;
2060 if (dev->rx_pkt_burst == ice_recv_pkts ||
2061 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
2062 dev->rx_pkt_burst == ice_recv_scattered_pkts)
2066 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
2067 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
2068 #ifdef CC_AVX512_SUPPORT
2069 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
2070 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
2071 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
2072 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2074 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2075 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
2076 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
2077 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
2085 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2087 volatile union ice_rx_flex_desc *rxdp;
2088 struct ice_rx_queue *rxq = rx_queue;
2091 if (unlikely(offset >= rxq->nb_rx_desc))
2094 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2095 return RTE_ETH_RX_DESC_UNAVAIL;
2097 desc = rxq->rx_tail + offset;
2098 if (desc >= rxq->nb_rx_desc)
2099 desc -= rxq->nb_rx_desc;
2101 rxdp = &rxq->rx_ring[desc];
2102 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2103 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2104 return RTE_ETH_RX_DESC_DONE;
2106 return RTE_ETH_RX_DESC_AVAIL;
2110 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2112 struct ice_tx_queue *txq = tx_queue;
2113 volatile uint64_t *status;
2114 uint64_t mask, expect;
2117 if (unlikely(offset >= txq->nb_tx_desc))
2120 desc = txq->tx_tail + offset;
2121 /* go to next desc that has the RS bit */
2122 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2124 if (desc >= txq->nb_tx_desc) {
2125 desc -= txq->nb_tx_desc;
2126 if (desc >= txq->nb_tx_desc)
2127 desc -= txq->nb_tx_desc;
2130 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2131 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2132 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2133 ICE_TXD_QW1_DTYPE_S);
2134 if ((*status & mask) == expect)
2135 return RTE_ETH_TX_DESC_DONE;
2137 return RTE_ETH_TX_DESC_FULL;
2141 ice_free_queues(struct rte_eth_dev *dev)
2145 PMD_INIT_FUNC_TRACE();
2147 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2148 if (!dev->data->rx_queues[i])
2150 ice_rx_queue_release(dev->data->rx_queues[i]);
2151 dev->data->rx_queues[i] = NULL;
2153 dev->data->nb_rx_queues = 0;
2155 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2156 if (!dev->data->tx_queues[i])
2158 ice_tx_queue_release(dev->data->tx_queues[i]);
2159 dev->data->tx_queues[i] = NULL;
2161 dev->data->nb_tx_queues = 0;
2164 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
2165 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
2168 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2170 struct ice_tx_queue *txq;
2171 const struct rte_memzone *tz = NULL;
2173 struct rte_eth_dev *dev;
2176 PMD_DRV_LOG(ERR, "PF is not available");
2180 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2182 /* Allocate the TX queue data structure. */
2183 txq = rte_zmalloc_socket("ice fdir tx queue",
2184 sizeof(struct ice_tx_queue),
2185 RTE_CACHE_LINE_SIZE,
2188 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2189 "tx queue structure.");
2193 /* Allocate TX hardware ring descriptors. */
2194 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2195 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2197 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2198 ICE_FDIR_QUEUE_ID, ring_size,
2199 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2201 ice_tx_queue_release(txq);
2202 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2207 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2208 txq->queue_id = ICE_FDIR_QUEUE_ID;
2209 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2210 txq->vsi = pf->fdir.fdir_vsi;
2212 txq->tx_ring_dma = tz->iova;
2213 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2215 * don't need to allocate software ring and reset for the fdir
2216 * program queue just set the queue has been configured.
2221 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2227 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2229 struct ice_rx_queue *rxq;
2230 const struct rte_memzone *rz = NULL;
2232 struct rte_eth_dev *dev;
2235 PMD_DRV_LOG(ERR, "PF is not available");
2239 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2241 /* Allocate the RX queue data structure. */
2242 rxq = rte_zmalloc_socket("ice fdir rx queue",
2243 sizeof(struct ice_rx_queue),
2244 RTE_CACHE_LINE_SIZE,
2247 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2248 "rx queue structure.");
2252 /* Allocate RX hardware ring descriptors. */
2253 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2254 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2256 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2257 ICE_FDIR_QUEUE_ID, ring_size,
2258 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2260 ice_rx_queue_release(rxq);
2261 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2266 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2267 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2268 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2269 rxq->vsi = pf->fdir.fdir_vsi;
2271 rxq->rx_ring_dma = rz->iova;
2272 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2273 sizeof(union ice_32byte_rx_desc));
2274 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2277 * Don't need to allocate software ring and reset for the fdir
2278 * rx queue, just set the queue has been configured.
2283 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2289 ice_recv_pkts(void *rx_queue,
2290 struct rte_mbuf **rx_pkts,
2293 struct ice_rx_queue *rxq = rx_queue;
2294 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2295 volatile union ice_rx_flex_desc *rxdp;
2296 union ice_rx_flex_desc rxd;
2297 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2298 struct ice_rx_entry *rxe;
2299 struct rte_mbuf *nmb; /* new allocated mbuf */
2300 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2301 uint16_t rx_id = rxq->rx_tail;
2303 uint16_t nb_hold = 0;
2304 uint16_t rx_packet_len;
2305 uint16_t rx_stat_err0;
2308 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2309 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2310 struct ice_vsi *vsi = rxq->vsi;
2311 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2313 struct ice_adapter *ad = rxq->vsi->adapter;
2315 while (nb_rx < nb_pkts) {
2316 rxdp = &rx_ring[rx_id];
2317 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2319 /* Check the DD bit first */
2320 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2324 nmb = rte_mbuf_raw_alloc(rxq->mp);
2325 if (unlikely(!nmb)) {
2326 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2329 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2332 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2334 if (unlikely(rx_id == rxq->nb_rx_desc))
2339 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2342 * fill the read format of descriptor with physic address in
2343 * new allocated mbuf: nmb
2345 rxdp->read.hdr_addr = 0;
2346 rxdp->read.pkt_addr = dma_addr;
2348 /* calculate rx_packet_len of the received pkt */
2349 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2350 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2352 /* fill old mbuf with received descriptor: rxd */
2353 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2354 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2357 rxm->pkt_len = rx_packet_len;
2358 rxm->data_len = rx_packet_len;
2359 rxm->port = rxq->port_id;
2360 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2361 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2362 ice_rxd_to_vlan_tci(rxm, &rxd);
2363 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
2364 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2365 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2366 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
2367 ts_ns = ice_tstamp_convert_32b_64b(hw,
2368 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
2369 if (ice_timestamp_dynflag > 0) {
2370 *RTE_MBUF_DYNFIELD(rxm,
2371 ice_timestamp_dynfield_offset,
2372 rte_mbuf_timestamp_t *) = ts_ns;
2373 rxm->ol_flags |= ice_timestamp_dynflag;
2377 if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
2378 RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2380 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2381 rxm->timesync = rxq->queue_id;
2382 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2385 rxm->ol_flags |= pkt_flags;
2386 /* copy old mbuf to rx_pkts */
2387 rx_pkts[nb_rx++] = rxm;
2389 rxq->rx_tail = rx_id;
2391 * If the number of free RX descriptors is greater than the RX free
2392 * threshold of the queue, advance the receive tail register of queue.
2393 * Update that register with the value of the last processed RX
2394 * descriptor minus 1.
2396 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2397 if (nb_hold > rxq->rx_free_thresh) {
2398 rx_id = (uint16_t)(rx_id == 0 ?
2399 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2400 /* write TAIL register */
2401 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2404 rxq->nb_rx_hold = nb_hold;
2406 /* return received packet in the burst */
2411 ice_parse_tunneling_params(uint64_t ol_flags,
2412 union ice_tx_offload tx_offload,
2413 uint32_t *cd_tunneling)
2415 /* EIPT: External (outer) IP header type */
2416 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
2417 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2418 else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
2419 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2420 else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
2421 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2423 /* EIPLEN: External (outer) IP header length, in DWords */
2424 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2425 ICE_TXD_CTX_QW0_EIPLEN_S;
2427 /* L4TUNT: L4 Tunneling Type */
2428 switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
2429 case RTE_MBUF_F_TX_TUNNEL_IPIP:
2430 /* for non UDP / GRE tunneling, set to 00b */
2432 case RTE_MBUF_F_TX_TUNNEL_VXLAN:
2433 case RTE_MBUF_F_TX_TUNNEL_GTP:
2434 case RTE_MBUF_F_TX_TUNNEL_GENEVE:
2435 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2437 case RTE_MBUF_F_TX_TUNNEL_GRE:
2438 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2441 PMD_TX_LOG(ERR, "Tunnel type not supported");
2445 /* L4TUNLEN: L4 Tunneling Length, in Words
2447 * We depend on app to set rte_mbuf.l2_len correctly.
2448 * For IP in GRE it should be set to the length of the GRE
2450 * For MAC in GRE or MAC in UDP it should be set to the length
2451 * of the GRE or UDP headers plus the inner MAC up to including
2452 * its last Ethertype.
2453 * If MPLS labels exists, it should include them as well.
2455 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2456 ICE_TXD_CTX_QW0_NATLEN_S;
2459 * Calculate the tunneling UDP checksum.
2460 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2462 if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
2463 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2464 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2468 ice_txd_enable_checksum(uint64_t ol_flags,
2470 uint32_t *td_offset,
2471 union ice_tx_offload tx_offload)
2474 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2475 *td_offset |= (tx_offload.outer_l2_len >> 1)
2476 << ICE_TX_DESC_LEN_MACLEN_S;
2478 *td_offset |= (tx_offload.l2_len >> 1)
2479 << ICE_TX_DESC_LEN_MACLEN_S;
2481 /* Enable L3 checksum offloads */
2482 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
2483 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2484 *td_offset |= (tx_offload.l3_len >> 2) <<
2485 ICE_TX_DESC_LEN_IPLEN_S;
2486 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
2487 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2488 *td_offset |= (tx_offload.l3_len >> 2) <<
2489 ICE_TX_DESC_LEN_IPLEN_S;
2490 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2491 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2492 *td_offset |= (tx_offload.l3_len >> 2) <<
2493 ICE_TX_DESC_LEN_IPLEN_S;
2496 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2497 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2498 *td_offset |= (tx_offload.l4_len >> 2) <<
2499 ICE_TX_DESC_LEN_L4_LEN_S;
2503 /* Enable L4 checksum offloads */
2504 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2505 case RTE_MBUF_F_TX_TCP_CKSUM:
2506 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2507 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2508 ICE_TX_DESC_LEN_L4_LEN_S;
2510 case RTE_MBUF_F_TX_SCTP_CKSUM:
2511 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2512 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2513 ICE_TX_DESC_LEN_L4_LEN_S;
2515 case RTE_MBUF_F_TX_UDP_CKSUM:
2516 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2517 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2518 ICE_TX_DESC_LEN_L4_LEN_S;
2526 ice_xmit_cleanup(struct ice_tx_queue *txq)
2528 struct ice_tx_entry *sw_ring = txq->sw_ring;
2529 volatile struct ice_tx_desc *txd = txq->tx_ring;
2530 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2531 uint16_t nb_tx_desc = txq->nb_tx_desc;
2532 uint16_t desc_to_clean_to;
2533 uint16_t nb_tx_to_clean;
2535 /* Determine the last descriptor needing to be cleaned */
2536 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2537 if (desc_to_clean_to >= nb_tx_desc)
2538 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2540 /* Check to make sure the last descriptor to clean is done */
2541 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2542 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2543 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2544 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2545 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2547 txq->port_id, txq->queue_id,
2548 txd[desc_to_clean_to].cmd_type_offset_bsz);
2549 /* Failed to clean any descriptors */
2553 /* Figure out how many descriptors will be cleaned */
2554 if (last_desc_cleaned > desc_to_clean_to)
2555 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2558 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2561 /* The last descriptor to clean is done, so that means all the
2562 * descriptors from the last descriptor that was cleaned
2563 * up to the last descriptor with the RS bit set
2564 * are done. Only reset the threshold descriptor.
2566 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2568 /* Update the txq to reflect the last descriptor that was cleaned */
2569 txq->last_desc_cleaned = desc_to_clean_to;
2570 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2575 /* Construct the tx flags */
2576 static inline uint64_t
2577 ice_build_ctob(uint32_t td_cmd,
2582 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2583 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2584 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2585 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2586 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2589 /* Check if the context descriptor is needed for TX offloading */
2590 static inline uint16_t
2591 ice_calc_context_desc(uint64_t flags)
2593 static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG |
2594 RTE_MBUF_F_TX_QINQ |
2595 RTE_MBUF_F_TX_OUTER_IP_CKSUM |
2596 RTE_MBUF_F_TX_TUNNEL_MASK |
2597 RTE_MBUF_F_TX_IEEE1588_TMST;
2599 return (flags & mask) ? 1 : 0;
2602 /* set ice TSO context descriptor */
2603 static inline uint64_t
2604 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2606 uint64_t ctx_desc = 0;
2607 uint32_t cd_cmd, hdr_len, cd_tso_len;
2609 if (!tx_offload.l4_len) {
2610 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2614 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2615 hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
2616 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2618 cd_cmd = ICE_TX_CTX_DESC_TSO;
2619 cd_tso_len = mbuf->pkt_len - hdr_len;
2620 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2621 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2622 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2627 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2628 #define ICE_MAX_DATA_PER_TXD \
2629 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2630 /* Calculate the number of TX descriptors needed for each pkt */
2631 static inline uint16_t
2632 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2634 struct rte_mbuf *txd = tx_pkt;
2637 while (txd != NULL) {
2638 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2646 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2648 struct ice_tx_queue *txq;
2649 volatile struct ice_tx_desc *tx_ring;
2650 volatile struct ice_tx_desc *txd;
2651 struct ice_tx_entry *sw_ring;
2652 struct ice_tx_entry *txe, *txn;
2653 struct rte_mbuf *tx_pkt;
2654 struct rte_mbuf *m_seg;
2655 uint32_t cd_tunneling_params;
2660 uint32_t td_cmd = 0;
2661 uint32_t td_offset = 0;
2662 uint32_t td_tag = 0;
2665 uint64_t buf_dma_addr;
2667 union ice_tx_offload tx_offload = {0};
2670 sw_ring = txq->sw_ring;
2671 tx_ring = txq->tx_ring;
2672 tx_id = txq->tx_tail;
2673 txe = &sw_ring[tx_id];
2675 /* Check if the descriptor ring needs to be cleaned. */
2676 if (txq->nb_tx_free < txq->tx_free_thresh)
2677 (void)ice_xmit_cleanup(txq);
2679 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2680 tx_pkt = *tx_pkts++;
2685 ol_flags = tx_pkt->ol_flags;
2686 tx_offload.l2_len = tx_pkt->l2_len;
2687 tx_offload.l3_len = tx_pkt->l3_len;
2688 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2689 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2690 tx_offload.l4_len = tx_pkt->l4_len;
2691 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2692 /* Calculate the number of context descriptors needed. */
2693 nb_ctx = ice_calc_context_desc(ol_flags);
2695 /* The number of descriptors that must be allocated for
2696 * a packet equals to the number of the segments of that
2697 * packet plus the number of context descriptor if needed.
2698 * Recalculate the needed tx descs when TSO enabled in case
2699 * the mbuf data size exceeds max data size that hw allows
2702 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
2703 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2706 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2707 tx_last = (uint16_t)(tx_id + nb_used - 1);
2710 if (tx_last >= txq->nb_tx_desc)
2711 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2713 if (nb_used > txq->nb_tx_free) {
2714 if (ice_xmit_cleanup(txq) != 0) {
2719 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2720 while (nb_used > txq->nb_tx_free) {
2721 if (ice_xmit_cleanup(txq) != 0) {
2730 /* Descriptor based VLAN insertion */
2731 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
2732 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2733 td_tag = tx_pkt->vlan_tci;
2736 /* Fill in tunneling parameters if necessary */
2737 cd_tunneling_params = 0;
2738 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2739 ice_parse_tunneling_params(ol_flags, tx_offload,
2740 &cd_tunneling_params);
2742 /* Enable checksum offloading */
2743 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2744 ice_txd_enable_checksum(ol_flags, &td_cmd,
2745 &td_offset, tx_offload);
2748 /* Setup TX context descriptor if required */
2749 volatile struct ice_tx_ctx_desc *ctx_txd =
2750 (volatile struct ice_tx_ctx_desc *)
2752 uint16_t cd_l2tag2 = 0;
2753 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2755 txn = &sw_ring[txe->next_id];
2756 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2758 rte_pktmbuf_free_seg(txe->mbuf);
2762 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
2763 cd_type_cmd_tso_mss |=
2764 ice_set_tso_ctx(tx_pkt, tx_offload);
2765 else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
2766 cd_type_cmd_tso_mss |=
2767 ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
2768 ICE_TXD_CTX_QW1_CMD_S);
2770 ctx_txd->tunneling_params =
2771 rte_cpu_to_le_32(cd_tunneling_params);
2773 /* TX context descriptor based double VLAN insert */
2774 if (ol_flags & RTE_MBUF_F_TX_QINQ) {
2775 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2776 cd_type_cmd_tso_mss |=
2777 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2778 ICE_TXD_CTX_QW1_CMD_S);
2780 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2782 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2784 txe->last_id = tx_last;
2785 tx_id = txe->next_id;
2791 txd = &tx_ring[tx_id];
2792 txn = &sw_ring[txe->next_id];
2795 rte_pktmbuf_free_seg(txe->mbuf);
2798 /* Setup TX Descriptor */
2799 slen = m_seg->data_len;
2800 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2802 while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
2803 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2804 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2805 txd->cmd_type_offset_bsz =
2806 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2807 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2808 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2809 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2810 ICE_TXD_QW1_TX_BUF_SZ_S) |
2811 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2813 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2814 slen -= ICE_MAX_DATA_PER_TXD;
2816 txe->last_id = tx_last;
2817 tx_id = txe->next_id;
2819 txd = &tx_ring[tx_id];
2820 txn = &sw_ring[txe->next_id];
2823 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2824 txd->cmd_type_offset_bsz =
2825 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2826 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2827 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2828 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2829 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2831 txe->last_id = tx_last;
2832 tx_id = txe->next_id;
2834 m_seg = m_seg->next;
2837 /* fill the last descriptor with End of Packet (EOP) bit */
2838 td_cmd |= ICE_TX_DESC_CMD_EOP;
2839 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2840 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2842 /* set RS bit on the last descriptor of one packet */
2843 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2845 "Setting RS bit on TXD id="
2846 "%4u (port=%d queue=%d)",
2847 tx_last, txq->port_id, txq->queue_id);
2849 td_cmd |= ICE_TX_DESC_CMD_RS;
2851 /* Update txq RS bit counters */
2852 txq->nb_tx_used = 0;
2854 txd->cmd_type_offset_bsz |=
2855 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2859 /* update Tail register */
2860 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2861 txq->tx_tail = tx_id;
2866 static __rte_always_inline int
2867 ice_tx_free_bufs(struct ice_tx_queue *txq)
2869 struct ice_tx_entry *txep;
2872 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2873 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2874 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2877 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2879 for (i = 0; i < txq->tx_rs_thresh; i++)
2880 rte_prefetch0((txep + i)->mbuf);
2882 if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
2883 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2884 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2888 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2889 rte_pktmbuf_free_seg(txep->mbuf);
2894 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2895 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2896 if (txq->tx_next_dd >= txq->nb_tx_desc)
2897 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2899 return txq->tx_rs_thresh;
2903 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2906 struct ice_tx_entry *swr_ring = txq->sw_ring;
2907 uint16_t i, tx_last, tx_id;
2908 uint16_t nb_tx_free_last;
2909 uint16_t nb_tx_to_clean;
2912 /* Start free mbuf from the next of tx_tail */
2913 tx_last = txq->tx_tail;
2914 tx_id = swr_ring[tx_last].next_id;
2916 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2919 nb_tx_to_clean = txq->nb_tx_free;
2920 nb_tx_free_last = txq->nb_tx_free;
2922 free_cnt = txq->nb_tx_desc;
2924 /* Loop through swr_ring to count the amount of
2925 * freeable mubfs and packets.
2927 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2928 for (i = 0; i < nb_tx_to_clean &&
2929 pkt_cnt < free_cnt &&
2930 tx_id != tx_last; i++) {
2931 if (swr_ring[tx_id].mbuf != NULL) {
2932 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2933 swr_ring[tx_id].mbuf = NULL;
2936 * last segment in the packet,
2937 * increment packet count
2939 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2942 tx_id = swr_ring[tx_id].next_id;
2945 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2946 txq->nb_tx_free || tx_id == tx_last)
2949 if (pkt_cnt < free_cnt) {
2950 if (ice_xmit_cleanup(txq))
2953 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2954 nb_tx_free_last = txq->nb_tx_free;
2958 return (int)pkt_cnt;
2963 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2964 uint32_t free_cnt __rte_unused)
2971 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2976 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2977 free_cnt = txq->nb_tx_desc;
2979 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2981 for (i = 0; i < cnt; i += n) {
2982 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2985 n = ice_tx_free_bufs(txq);
2995 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2997 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2998 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2999 struct ice_adapter *ad =
3000 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3003 if (ad->tx_vec_allowed)
3004 return ice_tx_done_cleanup_vec(q, free_cnt);
3006 if (ad->tx_simple_allowed)
3007 return ice_tx_done_cleanup_simple(q, free_cnt);
3009 return ice_tx_done_cleanup_full(q, free_cnt);
3012 /* Populate 4 descriptors with data from 4 mbufs */
3014 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3019 for (i = 0; i < 4; i++, txdp++, pkts++) {
3020 dma_addr = rte_mbuf_data_iova(*pkts);
3021 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3022 txdp->cmd_type_offset_bsz =
3023 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3024 (*pkts)->data_len, 0);
3028 /* Populate 1 descriptor with data from 1 mbuf */
3030 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3034 dma_addr = rte_mbuf_data_iova(*pkts);
3035 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3036 txdp->cmd_type_offset_bsz =
3037 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3038 (*pkts)->data_len, 0);
3042 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
3045 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
3046 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
3047 const int N_PER_LOOP = 4;
3048 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
3049 int mainpart, leftover;
3053 * Process most of the packets in chunks of N pkts. Any
3054 * leftover packets will get processed one at a time.
3056 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
3057 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
3058 for (i = 0; i < mainpart; i += N_PER_LOOP) {
3059 /* Copy N mbuf pointers to the S/W ring */
3060 for (j = 0; j < N_PER_LOOP; ++j)
3061 (txep + i + j)->mbuf = *(pkts + i + j);
3062 tx4(txdp + i, pkts + i);
3065 if (unlikely(leftover > 0)) {
3066 for (i = 0; i < leftover; ++i) {
3067 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
3068 tx1(txdp + mainpart + i, pkts + mainpart + i);
3073 static inline uint16_t
3074 tx_xmit_pkts(struct ice_tx_queue *txq,
3075 struct rte_mbuf **tx_pkts,
3078 volatile struct ice_tx_desc *txr = txq->tx_ring;
3082 * Begin scanning the H/W ring for done descriptors when the number
3083 * of available descriptors drops below tx_free_thresh. For each done
3084 * descriptor, free the associated buffer.
3086 if (txq->nb_tx_free < txq->tx_free_thresh)
3087 ice_tx_free_bufs(txq);
3089 /* Use available descriptor only */
3090 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
3091 if (unlikely(!nb_pkts))
3094 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
3095 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
3096 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
3097 ice_tx_fill_hw_ring(txq, tx_pkts, n);
3098 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3099 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3101 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3105 /* Fill hardware descriptor ring with mbuf data */
3106 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3107 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3109 /* Determin if RS bit needs to be set */
3110 if (txq->tx_tail > txq->tx_next_rs) {
3111 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3112 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3115 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3116 if (txq->tx_next_rs >= txq->nb_tx_desc)
3117 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3120 if (txq->tx_tail >= txq->nb_tx_desc)
3123 /* Update the tx tail register */
3124 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3130 ice_xmit_pkts_simple(void *tx_queue,
3131 struct rte_mbuf **tx_pkts,
3136 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3137 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3141 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3144 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3145 &tx_pkts[nb_tx], num);
3146 nb_tx = (uint16_t)(nb_tx + ret);
3147 nb_pkts = (uint16_t)(nb_pkts - ret);
3156 ice_set_rx_function(struct rte_eth_dev *dev)
3158 PMD_INIT_FUNC_TRACE();
3159 struct ice_adapter *ad =
3160 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3162 struct ice_rx_queue *rxq;
3164 int rx_check_ret = -1;
3166 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3167 ad->rx_use_avx512 = false;
3168 ad->rx_use_avx2 = false;
3169 rx_check_ret = ice_rx_vec_dev_check(dev);
3172 if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3173 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3174 ad->rx_vec_allowed = true;
3175 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3176 rxq = dev->data->rx_queues[i];
3177 if (rxq && ice_rxq_vec_setup(rxq)) {
3178 ad->rx_vec_allowed = false;
3183 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3184 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3185 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3186 #ifdef CC_AVX512_SUPPORT
3187 ad->rx_use_avx512 = true;
3190 "AVX512 is not supported in build env");
3192 if (!ad->rx_use_avx512 &&
3193 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3194 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3195 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3196 ad->rx_use_avx2 = true;
3199 ad->rx_vec_allowed = false;
3203 if (ad->rx_vec_allowed) {
3204 if (dev->data->scattered_rx) {
3205 if (ad->rx_use_avx512) {
3206 #ifdef CC_AVX512_SUPPORT
3207 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3209 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3210 dev->data->port_id);
3212 ice_recv_scattered_pkts_vec_avx512_offload;
3215 "Using AVX512 Vector Scattered Rx (port %d).",
3216 dev->data->port_id);
3218 ice_recv_scattered_pkts_vec_avx512;
3221 } else if (ad->rx_use_avx2) {
3222 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3224 "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
3225 dev->data->port_id);
3227 ice_recv_scattered_pkts_vec_avx2_offload;
3230 "Using AVX2 Vector Scattered Rx (port %d).",
3231 dev->data->port_id);
3233 ice_recv_scattered_pkts_vec_avx2;
3237 "Using Vector Scattered Rx (port %d).",
3238 dev->data->port_id);
3239 dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
3242 if (ad->rx_use_avx512) {
3243 #ifdef CC_AVX512_SUPPORT
3244 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3246 "Using AVX512 OFFLOAD Vector Rx (port %d).",
3247 dev->data->port_id);
3249 ice_recv_pkts_vec_avx512_offload;
3252 "Using AVX512 Vector Rx (port %d).",
3253 dev->data->port_id);
3255 ice_recv_pkts_vec_avx512;
3258 } else if (ad->rx_use_avx2) {
3259 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3261 "Using AVX2 OFFLOAD Vector Rx (port %d).",
3262 dev->data->port_id);
3264 ice_recv_pkts_vec_avx2_offload;
3267 "Using AVX2 Vector Rx (port %d).",
3268 dev->data->port_id);
3270 ice_recv_pkts_vec_avx2;
3274 "Using Vector Rx (port %d).",
3275 dev->data->port_id);
3276 dev->rx_pkt_burst = ice_recv_pkts_vec;
3284 if (dev->data->scattered_rx) {
3285 /* Set the non-LRO scattered function */
3287 "Using a Scattered function on port %d.",
3288 dev->data->port_id);
3289 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3290 } else if (ad->rx_bulk_alloc_allowed) {
3292 "Rx Burst Bulk Alloc Preconditions are "
3293 "satisfied. Rx Burst Bulk Alloc function "
3294 "will be used on port %d.",
3295 dev->data->port_id);
3296 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3299 "Rx Burst Bulk Alloc Preconditions are not "
3300 "satisfied, Normal Rx will be used on port %d.",
3301 dev->data->port_id);
3302 dev->rx_pkt_burst = ice_recv_pkts;
3306 static const struct {
3307 eth_rx_burst_t pkt_burst;
3309 } ice_rx_burst_infos[] = {
3310 { ice_recv_scattered_pkts, "Scalar Scattered" },
3311 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3312 { ice_recv_pkts, "Scalar" },
3314 #ifdef CC_AVX512_SUPPORT
3315 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3316 { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3317 { ice_recv_pkts_vec_avx512, "Vector AVX512" },
3318 { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3320 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3321 { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
3322 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3323 { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
3324 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3325 { ice_recv_pkts_vec, "Vector SSE" },
3330 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3331 struct rte_eth_burst_mode *mode)
3333 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3337 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3338 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3339 snprintf(mode->info, sizeof(mode->info), "%s",
3340 ice_rx_burst_infos[i].info);
3350 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3352 struct ice_adapter *ad =
3353 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3355 /* Use a simple Tx queue if possible (only fast free is allowed) */
3356 ad->tx_simple_allowed =
3358 (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
3359 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3361 if (ad->tx_simple_allowed)
3362 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3366 "Simple Tx can NOT be enabled on Tx queue %u.",
3370 /*********************************************************************
3374 **********************************************************************/
3375 /* The default values of TSO MSS */
3376 #define ICE_MIN_TSO_MSS 64
3377 #define ICE_MAX_TSO_MSS 9728
3378 #define ICE_MAX_TSO_FRAME_SIZE 262144
3380 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3387 for (i = 0; i < nb_pkts; i++) {
3389 ol_flags = m->ol_flags;
3391 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
3392 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3393 m->tso_segsz > ICE_MAX_TSO_MSS ||
3394 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3396 * MSS outside the range are considered malicious
3402 #ifdef RTE_ETHDEV_DEBUG_TX
3403 ret = rte_validate_tx_offload(m);
3409 ret = rte_net_intel_cksum_prepare(m);
3419 ice_set_tx_function(struct rte_eth_dev *dev)
3421 struct ice_adapter *ad =
3422 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3424 struct ice_tx_queue *txq;
3426 int tx_check_ret = -1;
3428 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3429 ad->tx_use_avx2 = false;
3430 ad->tx_use_avx512 = false;
3431 tx_check_ret = ice_tx_vec_dev_check(dev);
3432 if (tx_check_ret >= 0 &&
3433 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3434 ad->tx_vec_allowed = true;
3436 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3437 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3438 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3439 #ifdef CC_AVX512_SUPPORT
3440 ad->tx_use_avx512 = true;
3443 "AVX512 is not supported in build env");
3445 if (!ad->tx_use_avx512 &&
3446 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3447 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3448 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3449 ad->tx_use_avx2 = true;
3451 if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
3452 tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3453 ad->tx_vec_allowed = false;
3455 if (ad->tx_vec_allowed) {
3456 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3457 txq = dev->data->tx_queues[i];
3458 if (txq && ice_txq_vec_setup(txq)) {
3459 ad->tx_vec_allowed = false;
3465 ad->tx_vec_allowed = false;
3469 if (ad->tx_vec_allowed) {
3470 dev->tx_pkt_prepare = NULL;
3471 if (ad->tx_use_avx512) {
3472 #ifdef CC_AVX512_SUPPORT
3473 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3475 "Using AVX512 OFFLOAD Vector Tx (port %d).",
3476 dev->data->port_id);
3478 ice_xmit_pkts_vec_avx512_offload;
3479 dev->tx_pkt_prepare = ice_prep_pkts;
3482 "Using AVX512 Vector Tx (port %d).",
3483 dev->data->port_id);
3484 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3488 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3490 "Using AVX2 OFFLOAD Vector Tx (port %d).",
3491 dev->data->port_id);
3493 ice_xmit_pkts_vec_avx2_offload;
3494 dev->tx_pkt_prepare = ice_prep_pkts;
3496 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3497 ad->tx_use_avx2 ? "avx2 " : "",
3498 dev->data->port_id);
3499 dev->tx_pkt_burst = ad->tx_use_avx2 ?
3500 ice_xmit_pkts_vec_avx2 :
3509 if (ad->tx_simple_allowed) {
3510 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3511 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3512 dev->tx_pkt_prepare = NULL;
3514 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3515 dev->tx_pkt_burst = ice_xmit_pkts;
3516 dev->tx_pkt_prepare = ice_prep_pkts;
3520 static const struct {
3521 eth_tx_burst_t pkt_burst;
3523 } ice_tx_burst_infos[] = {
3524 { ice_xmit_pkts_simple, "Scalar Simple" },
3525 { ice_xmit_pkts, "Scalar" },
3527 #ifdef CC_AVX512_SUPPORT
3528 { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3529 { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3531 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3532 { ice_xmit_pkts_vec, "Vector SSE" },
3537 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3538 struct rte_eth_burst_mode *mode)
3540 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3544 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3545 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3546 snprintf(mode->info, sizeof(mode->info), "%s",
3547 ice_tx_burst_infos[i].info);
3556 /* For each value it means, datasheet of hardware can tell more details
3558 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3560 static inline uint32_t
3561 ice_get_default_pkt_type(uint16_t ptype)
3563 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3564 __rte_cache_aligned = {
3567 [1] = RTE_PTYPE_L2_ETHER,
3568 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3569 /* [3] - [5] reserved */
3570 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3571 /* [7] - [10] reserved */
3572 [11] = RTE_PTYPE_L2_ETHER_ARP,
3573 /* [12] - [21] reserved */
3575 /* Non tunneled IPv4 */
3576 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3578 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3579 RTE_PTYPE_L4_NONFRAG,
3580 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3583 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3585 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3587 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3591 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3592 RTE_PTYPE_TUNNEL_IP |
3593 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3594 RTE_PTYPE_INNER_L4_FRAG,
3595 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3596 RTE_PTYPE_TUNNEL_IP |
3597 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3598 RTE_PTYPE_INNER_L4_NONFRAG,
3599 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3600 RTE_PTYPE_TUNNEL_IP |
3601 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3602 RTE_PTYPE_INNER_L4_UDP,
3604 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3605 RTE_PTYPE_TUNNEL_IP |
3606 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3607 RTE_PTYPE_INNER_L4_TCP,
3608 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3609 RTE_PTYPE_TUNNEL_IP |
3610 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3611 RTE_PTYPE_INNER_L4_SCTP,
3612 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3613 RTE_PTYPE_TUNNEL_IP |
3614 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3615 RTE_PTYPE_INNER_L4_ICMP,
3618 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3619 RTE_PTYPE_TUNNEL_IP |
3620 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3621 RTE_PTYPE_INNER_L4_FRAG,
3622 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3623 RTE_PTYPE_TUNNEL_IP |
3624 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3625 RTE_PTYPE_INNER_L4_NONFRAG,
3626 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3627 RTE_PTYPE_TUNNEL_IP |
3628 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3629 RTE_PTYPE_INNER_L4_UDP,
3631 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3632 RTE_PTYPE_TUNNEL_IP |
3633 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3634 RTE_PTYPE_INNER_L4_TCP,
3635 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3636 RTE_PTYPE_TUNNEL_IP |
3637 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3638 RTE_PTYPE_INNER_L4_SCTP,
3639 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3640 RTE_PTYPE_TUNNEL_IP |
3641 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3642 RTE_PTYPE_INNER_L4_ICMP,
3644 /* IPv4 --> GRE/Teredo/VXLAN */
3645 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3646 RTE_PTYPE_TUNNEL_GRENAT,
3648 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3649 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3650 RTE_PTYPE_TUNNEL_GRENAT |
3651 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3652 RTE_PTYPE_INNER_L4_FRAG,
3653 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3654 RTE_PTYPE_TUNNEL_GRENAT |
3655 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3656 RTE_PTYPE_INNER_L4_NONFRAG,
3657 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3658 RTE_PTYPE_TUNNEL_GRENAT |
3659 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3660 RTE_PTYPE_INNER_L4_UDP,
3662 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3663 RTE_PTYPE_TUNNEL_GRENAT |
3664 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3665 RTE_PTYPE_INNER_L4_TCP,
3666 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3667 RTE_PTYPE_TUNNEL_GRENAT |
3668 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3669 RTE_PTYPE_INNER_L4_SCTP,
3670 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3671 RTE_PTYPE_TUNNEL_GRENAT |
3672 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3673 RTE_PTYPE_INNER_L4_ICMP,
3675 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3676 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3677 RTE_PTYPE_TUNNEL_GRENAT |
3678 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3679 RTE_PTYPE_INNER_L4_FRAG,
3680 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3681 RTE_PTYPE_TUNNEL_GRENAT |
3682 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3683 RTE_PTYPE_INNER_L4_NONFRAG,
3684 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3685 RTE_PTYPE_TUNNEL_GRENAT |
3686 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3687 RTE_PTYPE_INNER_L4_UDP,
3689 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3690 RTE_PTYPE_TUNNEL_GRENAT |
3691 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3692 RTE_PTYPE_INNER_L4_TCP,
3693 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3694 RTE_PTYPE_TUNNEL_GRENAT |
3695 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3696 RTE_PTYPE_INNER_L4_SCTP,
3697 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3698 RTE_PTYPE_TUNNEL_GRENAT |
3699 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3700 RTE_PTYPE_INNER_L4_ICMP,
3702 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3703 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3704 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3706 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3707 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3708 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3709 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3710 RTE_PTYPE_INNER_L4_FRAG,
3711 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3712 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3713 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3714 RTE_PTYPE_INNER_L4_NONFRAG,
3715 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3716 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3717 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3718 RTE_PTYPE_INNER_L4_UDP,
3720 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3721 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3722 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3723 RTE_PTYPE_INNER_L4_TCP,
3724 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3725 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3726 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3727 RTE_PTYPE_INNER_L4_SCTP,
3728 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3729 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3730 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3731 RTE_PTYPE_INNER_L4_ICMP,
3733 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3734 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3735 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3736 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3737 RTE_PTYPE_INNER_L4_FRAG,
3738 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3739 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3740 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3741 RTE_PTYPE_INNER_L4_NONFRAG,
3742 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3743 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3744 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3745 RTE_PTYPE_INNER_L4_UDP,
3747 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3748 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3749 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3750 RTE_PTYPE_INNER_L4_TCP,
3751 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3752 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3753 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3754 RTE_PTYPE_INNER_L4_SCTP,
3755 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3756 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3757 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3758 RTE_PTYPE_INNER_L4_ICMP,
3759 /* [73] - [87] reserved */
3761 /* Non tunneled IPv6 */
3762 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3764 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3765 RTE_PTYPE_L4_NONFRAG,
3766 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3769 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3771 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3773 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3777 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3778 RTE_PTYPE_TUNNEL_IP |
3779 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3780 RTE_PTYPE_INNER_L4_FRAG,
3781 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3782 RTE_PTYPE_TUNNEL_IP |
3783 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3784 RTE_PTYPE_INNER_L4_NONFRAG,
3785 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3786 RTE_PTYPE_TUNNEL_IP |
3787 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3788 RTE_PTYPE_INNER_L4_UDP,
3790 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3791 RTE_PTYPE_TUNNEL_IP |
3792 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3793 RTE_PTYPE_INNER_L4_TCP,
3794 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3795 RTE_PTYPE_TUNNEL_IP |
3796 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3797 RTE_PTYPE_INNER_L4_SCTP,
3798 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3799 RTE_PTYPE_TUNNEL_IP |
3800 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3801 RTE_PTYPE_INNER_L4_ICMP,
3804 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3805 RTE_PTYPE_TUNNEL_IP |
3806 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3807 RTE_PTYPE_INNER_L4_FRAG,
3808 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3809 RTE_PTYPE_TUNNEL_IP |
3810 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3811 RTE_PTYPE_INNER_L4_NONFRAG,
3812 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3813 RTE_PTYPE_TUNNEL_IP |
3814 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3815 RTE_PTYPE_INNER_L4_UDP,
3816 /* [105] reserved */
3817 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3818 RTE_PTYPE_TUNNEL_IP |
3819 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3820 RTE_PTYPE_INNER_L4_TCP,
3821 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3822 RTE_PTYPE_TUNNEL_IP |
3823 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3824 RTE_PTYPE_INNER_L4_SCTP,
3825 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3826 RTE_PTYPE_TUNNEL_IP |
3827 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3828 RTE_PTYPE_INNER_L4_ICMP,
3830 /* IPv6 --> GRE/Teredo/VXLAN */
3831 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3832 RTE_PTYPE_TUNNEL_GRENAT,
3834 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3835 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3836 RTE_PTYPE_TUNNEL_GRENAT |
3837 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3838 RTE_PTYPE_INNER_L4_FRAG,
3839 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3840 RTE_PTYPE_TUNNEL_GRENAT |
3841 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3842 RTE_PTYPE_INNER_L4_NONFRAG,
3843 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3844 RTE_PTYPE_TUNNEL_GRENAT |
3845 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3846 RTE_PTYPE_INNER_L4_UDP,
3847 /* [113] reserved */
3848 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3849 RTE_PTYPE_TUNNEL_GRENAT |
3850 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3851 RTE_PTYPE_INNER_L4_TCP,
3852 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3853 RTE_PTYPE_TUNNEL_GRENAT |
3854 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3855 RTE_PTYPE_INNER_L4_SCTP,
3856 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3857 RTE_PTYPE_TUNNEL_GRENAT |
3858 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3859 RTE_PTYPE_INNER_L4_ICMP,
3861 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3862 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3863 RTE_PTYPE_TUNNEL_GRENAT |
3864 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3865 RTE_PTYPE_INNER_L4_FRAG,
3866 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3867 RTE_PTYPE_TUNNEL_GRENAT |
3868 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3869 RTE_PTYPE_INNER_L4_NONFRAG,
3870 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3871 RTE_PTYPE_TUNNEL_GRENAT |
3872 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3873 RTE_PTYPE_INNER_L4_UDP,
3874 /* [120] reserved */
3875 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3876 RTE_PTYPE_TUNNEL_GRENAT |
3877 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3878 RTE_PTYPE_INNER_L4_TCP,
3879 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3880 RTE_PTYPE_TUNNEL_GRENAT |
3881 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3882 RTE_PTYPE_INNER_L4_SCTP,
3883 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3884 RTE_PTYPE_TUNNEL_GRENAT |
3885 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3886 RTE_PTYPE_INNER_L4_ICMP,
3888 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3889 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3890 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3892 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3893 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3894 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3895 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3896 RTE_PTYPE_INNER_L4_FRAG,
3897 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3898 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3899 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3900 RTE_PTYPE_INNER_L4_NONFRAG,
3901 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3902 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3903 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3904 RTE_PTYPE_INNER_L4_UDP,
3905 /* [128] reserved */
3906 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3907 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3908 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3909 RTE_PTYPE_INNER_L4_TCP,
3910 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3911 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3912 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3913 RTE_PTYPE_INNER_L4_SCTP,
3914 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3915 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3916 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3917 RTE_PTYPE_INNER_L4_ICMP,
3919 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3920 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3921 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3922 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3923 RTE_PTYPE_INNER_L4_FRAG,
3924 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3925 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3926 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3927 RTE_PTYPE_INNER_L4_NONFRAG,
3928 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3929 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3930 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3931 RTE_PTYPE_INNER_L4_UDP,
3932 /* [135] reserved */
3933 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3934 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3935 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3936 RTE_PTYPE_INNER_L4_TCP,
3937 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3938 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3939 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3940 RTE_PTYPE_INNER_L4_SCTP,
3941 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3942 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3943 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3944 RTE_PTYPE_INNER_L4_ICMP,
3945 /* [139] - [299] reserved */
3948 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3949 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3951 /* PPPoE --> IPv4 */
3952 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3953 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3955 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3956 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3957 RTE_PTYPE_L4_NONFRAG,
3958 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3959 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3961 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3962 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3964 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3965 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3967 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3968 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3971 /* PPPoE --> IPv6 */
3972 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3973 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3975 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3976 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3977 RTE_PTYPE_L4_NONFRAG,
3978 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3979 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3981 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3982 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3984 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3985 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3987 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3988 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3990 /* [314] - [324] reserved */
3992 /* IPv4/IPv6 --> GTPC/GTPU */
3993 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3994 RTE_PTYPE_TUNNEL_GTPC,
3995 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3996 RTE_PTYPE_TUNNEL_GTPC,
3997 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3998 RTE_PTYPE_TUNNEL_GTPC,
3999 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4000 RTE_PTYPE_TUNNEL_GTPC,
4001 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4002 RTE_PTYPE_TUNNEL_GTPU,
4003 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4004 RTE_PTYPE_TUNNEL_GTPU,
4006 /* IPv4 --> GTPU --> IPv4 */
4007 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4008 RTE_PTYPE_TUNNEL_GTPU |
4009 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4010 RTE_PTYPE_INNER_L4_FRAG,
4011 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4012 RTE_PTYPE_TUNNEL_GTPU |
4013 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4014 RTE_PTYPE_INNER_L4_NONFRAG,
4015 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4016 RTE_PTYPE_TUNNEL_GTPU |
4017 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4018 RTE_PTYPE_INNER_L4_UDP,
4019 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4020 RTE_PTYPE_TUNNEL_GTPU |
4021 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4022 RTE_PTYPE_INNER_L4_TCP,
4023 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4024 RTE_PTYPE_TUNNEL_GTPU |
4025 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4026 RTE_PTYPE_INNER_L4_ICMP,
4028 /* IPv6 --> GTPU --> IPv4 */
4029 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4030 RTE_PTYPE_TUNNEL_GTPU |
4031 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4032 RTE_PTYPE_INNER_L4_FRAG,
4033 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4034 RTE_PTYPE_TUNNEL_GTPU |
4035 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4036 RTE_PTYPE_INNER_L4_NONFRAG,
4037 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4038 RTE_PTYPE_TUNNEL_GTPU |
4039 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4040 RTE_PTYPE_INNER_L4_UDP,
4041 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4042 RTE_PTYPE_TUNNEL_GTPU |
4043 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4044 RTE_PTYPE_INNER_L4_TCP,
4045 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4046 RTE_PTYPE_TUNNEL_GTPU |
4047 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4048 RTE_PTYPE_INNER_L4_ICMP,
4050 /* IPv4 --> GTPU --> IPv6 */
4051 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4052 RTE_PTYPE_TUNNEL_GTPU |
4053 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4054 RTE_PTYPE_INNER_L4_FRAG,
4055 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4056 RTE_PTYPE_TUNNEL_GTPU |
4057 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4058 RTE_PTYPE_INNER_L4_NONFRAG,
4059 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4060 RTE_PTYPE_TUNNEL_GTPU |
4061 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4062 RTE_PTYPE_INNER_L4_UDP,
4063 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4064 RTE_PTYPE_TUNNEL_GTPU |
4065 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4066 RTE_PTYPE_INNER_L4_TCP,
4067 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4068 RTE_PTYPE_TUNNEL_GTPU |
4069 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4070 RTE_PTYPE_INNER_L4_ICMP,
4072 /* IPv6 --> GTPU --> IPv6 */
4073 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4074 RTE_PTYPE_TUNNEL_GTPU |
4075 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4076 RTE_PTYPE_INNER_L4_FRAG,
4077 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4078 RTE_PTYPE_TUNNEL_GTPU |
4079 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4080 RTE_PTYPE_INNER_L4_NONFRAG,
4081 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4082 RTE_PTYPE_TUNNEL_GTPU |
4083 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4084 RTE_PTYPE_INNER_L4_UDP,
4085 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4086 RTE_PTYPE_TUNNEL_GTPU |
4087 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4088 RTE_PTYPE_INNER_L4_TCP,
4089 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4090 RTE_PTYPE_TUNNEL_GTPU |
4091 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4092 RTE_PTYPE_INNER_L4_ICMP,
4094 /* IPv4 --> UDP ECPRI */
4095 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4097 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4099 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4101 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4103 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4105 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4107 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4109 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4111 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4113 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4116 /* IPV6 --> UDP ECPRI */
4117 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4119 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4121 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4123 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4125 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4127 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4129 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4131 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4133 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4135 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4137 /* All others reserved */
4140 return type_table[ptype];
4144 ice_set_default_ptype_table(struct rte_eth_dev *dev)
4146 struct ice_adapter *ad =
4147 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4150 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4151 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4154 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
4155 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
4156 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4157 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4158 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4160 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
4161 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
4162 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4163 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
4164 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
4165 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4168 * check the programming status descriptor in rx queue.
4169 * done after Programming Flow Director is programmed on
4173 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4175 volatile union ice_32byte_rx_desc *rxdp;
4182 rxdp = (volatile union ice_32byte_rx_desc *)
4183 (&rxq->rx_ring[rxq->rx_tail]);
4184 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4185 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4186 >> ICE_RXD_QW1_STATUS_S;
4188 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4190 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4191 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4192 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4193 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4195 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4196 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4197 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4198 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4202 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4203 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4205 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4209 rxdp->wb.qword1.status_error_len = 0;
4211 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4213 if (rxq->rx_tail == 0)
4214 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4216 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4222 #define ICE_FDIR_MAX_WAIT_US 10000
4225 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4227 struct ice_tx_queue *txq = pf->fdir.txq;
4228 struct ice_rx_queue *rxq = pf->fdir.rxq;
4229 volatile struct ice_fltr_desc *fdirdp;
4230 volatile struct ice_tx_desc *txdp;
4234 fdirdp = (volatile struct ice_fltr_desc *)
4235 (&txq->tx_ring[txq->tx_tail]);
4236 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4237 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4239 txdp = &txq->tx_ring[txq->tx_tail + 1];
4240 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4241 td_cmd = ICE_TX_DESC_CMD_EOP |
4242 ICE_TX_DESC_CMD_RS |
4243 ICE_TX_DESC_CMD_DUMMY;
4245 txdp->cmd_type_offset_bsz =
4246 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4249 if (txq->tx_tail >= txq->nb_tx_desc)
4251 /* Update the tx tail register */
4252 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4253 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4254 if ((txdp->cmd_type_offset_bsz &
4255 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4256 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4260 if (i >= ICE_FDIR_MAX_WAIT_US) {
4262 "Failed to program FDIR filter: time out to get DD on tx queue.");
4266 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4269 ret = ice_check_fdir_programming_status(rxq);
4277 "Failed to program FDIR filter: programming status reported.");