1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <ethdev_driver.h>
9 #include "rte_pmd_ice.h"
11 #include "ice_rxtx_vec_common.h"
13 #define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
14 RTE_MBUF_F_TX_L4_MASK | \
15 RTE_MBUF_F_TX_TCP_SEG | \
16 RTE_MBUF_F_TX_OUTER_IP_CKSUM)
18 /* Offset of mbuf dynamic field for protocol extraction data */
19 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
21 /* Mask of mbuf dynamic flags for protocol extraction type */
22 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
30 ice_monitor_callback(const uint64_t value,
31 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
33 const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
35 * we expect the DD bit to be set to 1 if this descriptor was already
38 return (value & m) == m ? -1 : 0;
42 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
44 volatile union ice_rx_flex_desc *rxdp;
45 struct ice_rx_queue *rxq = rx_queue;
49 rxdp = &rxq->rx_ring[desc];
50 /* watch for changes in status bit */
51 pmc->addr = &rxdp->wb.status_error0;
53 /* comparison callback */
54 pmc->fn = ice_monitor_callback;
56 /* register is 16-bit */
57 pmc->size = sizeof(uint16_t);
64 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
66 static uint8_t rxdid_map[] = {
67 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
68 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
69 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
70 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
71 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
72 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
73 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
76 return xtr_type < RTE_DIM(rxdid_map) ?
77 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
81 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
83 volatile union ice_rx_flex_desc *rxdp)
85 volatile struct ice_32b_rx_flex_desc_comms *desc =
86 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
87 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
89 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
90 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
91 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
94 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
95 if (desc->flow_id != 0xFFFFFFFF) {
96 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
97 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
103 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
105 volatile union ice_rx_flex_desc *rxdp)
107 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
108 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
109 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
113 if (desc->flow_id != 0xFFFFFFFF) {
114 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
115 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
118 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
119 stat_err = rte_le_to_cpu_16(desc->status_error0);
120 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
121 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
122 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
128 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
130 volatile union ice_rx_flex_desc *rxdp)
132 volatile struct ice_32b_rx_flex_desc_comms *desc =
133 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
136 stat_err = rte_le_to_cpu_16(desc->status_error0);
137 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
138 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
139 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
142 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
143 if (desc->flow_id != 0xFFFFFFFF) {
144 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
145 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
148 if (rxq->xtr_ol_flag) {
149 uint32_t metadata = 0;
151 stat_err = rte_le_to_cpu_16(desc->status_error1);
153 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
154 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
156 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
158 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
161 mb->ol_flags |= rxq->xtr_ol_flag;
163 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
170 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
172 volatile union ice_rx_flex_desc *rxdp)
174 volatile struct ice_32b_rx_flex_desc_comms *desc =
175 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
178 stat_err = rte_le_to_cpu_16(desc->status_error0);
179 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
180 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
181 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
184 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
185 if (desc->flow_id != 0xFFFFFFFF) {
186 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
187 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
190 if (rxq->xtr_ol_flag) {
191 uint32_t metadata = 0;
193 if (desc->flex_ts.flex.aux0 != 0xFFFF)
194 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
195 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
196 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
199 mb->ol_flags |= rxq->xtr_ol_flag;
201 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
208 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
211 case ICE_RXDID_COMMS_AUX_VLAN:
212 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
213 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
216 case ICE_RXDID_COMMS_AUX_IPV4:
217 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
218 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
221 case ICE_RXDID_COMMS_AUX_IPV6:
222 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
223 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
226 case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
227 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
228 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
231 case ICE_RXDID_COMMS_AUX_TCP:
232 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
233 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
236 case ICE_RXDID_COMMS_AUX_IP_OFFSET:
237 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
238 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
241 case ICE_RXDID_COMMS_GENERIC:
242 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
245 case ICE_RXDID_COMMS_OVS:
246 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
250 /* update this according to the RXDID for PROTO_XTR_NONE */
251 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
255 if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
256 rxq->xtr_ol_flag = 0;
259 static enum ice_status
260 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
262 struct ice_vsi *vsi = rxq->vsi;
263 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
264 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
265 struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
266 struct ice_rlan_ctx rx_ctx;
269 struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
270 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
273 /* Set buffer size as the head split is disabled. */
274 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
275 RTE_PKTMBUF_HEADROOM);
277 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
278 rxq->max_pkt_len = RTE_MIN((uint32_t)
279 ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
280 dev_data->dev_conf.rxmode.max_rx_pkt_len);
282 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
283 if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
284 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
285 PMD_DRV_LOG(ERR, "maximum packet length must "
286 "be larger than %u and smaller than %u,"
287 "as jumbo frame is enabled",
288 (uint32_t)ICE_ETH_MAX_LEN,
289 (uint32_t)ICE_FRAME_SIZE_MAX);
293 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
294 rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
295 PMD_DRV_LOG(ERR, "maximum packet length must be "
296 "larger than %u and smaller than %u, "
297 "as jumbo frame is disabled",
298 (uint32_t)RTE_ETHER_MIN_LEN,
299 (uint32_t)ICE_ETH_MAX_LEN);
304 memset(&rx_ctx, 0, sizeof(rx_ctx));
306 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
307 rx_ctx.qlen = rxq->nb_rx_desc;
308 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
309 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
310 rx_ctx.dtype = 0; /* No Header Split mode */
311 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
312 rx_ctx.dsize = 1; /* 32B descriptors */
314 rx_ctx.rxmax = rxq->max_pkt_len;
315 /* TPH: Transaction Layer Packet (TLP) processing hints */
316 rx_ctx.tphrdesc_ena = 1;
317 rx_ctx.tphwdesc_ena = 1;
318 rx_ctx.tphdata_ena = 1;
319 rx_ctx.tphhead_ena = 1;
320 /* Low Receive Queue Threshold defined in 64 descriptors units.
321 * When the number of free descriptors goes below the lrxqthresh,
322 * an immediate interrupt is triggered.
324 rx_ctx.lrxqthresh = 2;
325 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
328 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
330 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
332 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
333 rxq->port_id, rxq->queue_id, rxdid);
335 if (!(pf->supported_rxdid & BIT(rxdid))) {
336 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
341 ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
343 /* Enable Flexible Descriptors in the queue context which
344 * allows this driver to select a specific receive descriptor format
346 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
347 QRXFLXP_CNTXT_RXDID_IDX_M;
349 /* increasing context priority to pick up profile ID;
350 * default is 0x01; setting to 0x03 to ensure profile
351 * is programming if prev context is of same priority
353 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
354 QRXFLXP_CNTXT_RXDID_PRIO_M;
356 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
358 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
360 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
364 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
366 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
371 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
372 RTE_PKTMBUF_HEADROOM);
374 /* Check if scattered RX needs to be used. */
375 if (rxq->max_pkt_len > buf_size)
376 dev_data->scattered_rx = 1;
378 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
380 /* Init the Rx tail register*/
381 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
386 /* Allocate mbufs for all descriptors in rx queue */
388 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
390 struct ice_rx_entry *rxe = rxq->sw_ring;
394 for (i = 0; i < rxq->nb_rx_desc; i++) {
395 volatile union ice_rx_flex_desc *rxd;
396 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
398 if (unlikely(!mbuf)) {
399 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
403 rte_mbuf_refcnt_set(mbuf, 1);
405 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
407 mbuf->port = rxq->port_id;
410 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
412 rxd = &rxq->rx_ring[i];
413 rxd->read.pkt_addr = dma_addr;
414 rxd->read.hdr_addr = 0;
415 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
425 /* Free all mbufs for descriptors in rx queue */
427 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
431 if (!rxq || !rxq->sw_ring) {
432 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
436 for (i = 0; i < rxq->nb_rx_desc; i++) {
437 if (rxq->sw_ring[i].mbuf) {
438 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
439 rxq->sw_ring[i].mbuf = NULL;
442 if (rxq->rx_nb_avail == 0)
444 for (i = 0; i < rxq->rx_nb_avail; i++)
445 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
447 rxq->rx_nb_avail = 0;
450 /* turn on or off rx queue
451 * @q_idx: queue index in pf scope
452 * @on: turn on or off the queue
455 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
460 /* QRX_CTRL = QRX_ENA */
461 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
464 if (reg & QRX_CTRL_QENA_STAT_M)
465 return 0; /* Already on, skip */
466 reg |= QRX_CTRL_QENA_REQ_M;
468 if (!(reg & QRX_CTRL_QENA_STAT_M))
469 return 0; /* Already off, skip */
470 reg &= ~QRX_CTRL_QENA_REQ_M;
473 /* Write the register */
474 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
475 /* Check the result. It is said that QENA_STAT
476 * follows the QENA_REQ not more than 10 use.
477 * TODO: need to change the wait counter later
479 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
480 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
481 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
483 if ((reg & QRX_CTRL_QENA_REQ_M) &&
484 (reg & QRX_CTRL_QENA_STAT_M))
487 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
488 !(reg & QRX_CTRL_QENA_STAT_M))
493 /* Check if it is timeout */
494 if (j >= ICE_CHK_Q_ENA_COUNT) {
495 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
496 (on ? "enable" : "disable"), q_idx);
504 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
508 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
509 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
510 "rxq->rx_free_thresh=%d, "
511 "ICE_RX_MAX_BURST=%d",
512 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
514 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
515 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
516 "rxq->rx_free_thresh=%d, "
517 "rxq->nb_rx_desc=%d",
518 rxq->rx_free_thresh, rxq->nb_rx_desc);
520 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
521 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
522 "rxq->nb_rx_desc=%d, "
523 "rxq->rx_free_thresh=%d",
524 rxq->nb_rx_desc, rxq->rx_free_thresh);
531 /* reset fields in ice_rx_queue back to default */
533 ice_reset_rx_queue(struct ice_rx_queue *rxq)
539 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
543 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
545 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
546 ((volatile char *)rxq->rx_ring)[i] = 0;
548 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
549 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
550 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
552 rxq->rx_nb_avail = 0;
553 rxq->rx_next_avail = 0;
554 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
558 rxq->pkt_first_seg = NULL;
559 rxq->pkt_last_seg = NULL;
561 rxq->rxrearm_start = 0;
566 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
568 struct ice_rx_queue *rxq;
570 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
572 PMD_INIT_FUNC_TRACE();
574 if (rx_queue_id >= dev->data->nb_rx_queues) {
575 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
576 rx_queue_id, dev->data->nb_rx_queues);
580 rxq = dev->data->rx_queues[rx_queue_id];
581 if (!rxq || !rxq->q_set) {
582 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
587 err = ice_program_hw_rx_queue(rxq);
589 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
594 err = ice_alloc_rx_queue_mbufs(rxq);
596 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
600 /* Init the RX tail register. */
601 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
603 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
605 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
608 rxq->rx_rel_mbufs(rxq);
609 ice_reset_rx_queue(rxq);
613 dev->data->rx_queue_state[rx_queue_id] =
614 RTE_ETH_QUEUE_STATE_STARTED;
620 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
622 struct ice_rx_queue *rxq;
624 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
626 if (rx_queue_id < dev->data->nb_rx_queues) {
627 rxq = dev->data->rx_queues[rx_queue_id];
629 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
631 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
635 rxq->rx_rel_mbufs(rxq);
636 ice_reset_rx_queue(rxq);
637 dev->data->rx_queue_state[rx_queue_id] =
638 RTE_ETH_QUEUE_STATE_STOPPED;
645 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
647 struct ice_tx_queue *txq;
651 struct ice_aqc_add_tx_qgrp *txq_elem;
652 struct ice_tlan_ctx tx_ctx;
655 PMD_INIT_FUNC_TRACE();
657 if (tx_queue_id >= dev->data->nb_tx_queues) {
658 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
659 tx_queue_id, dev->data->nb_tx_queues);
663 txq = dev->data->tx_queues[tx_queue_id];
664 if (!txq || !txq->q_set) {
665 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
670 buf_len = ice_struct_size(txq_elem, txqs, 1);
671 txq_elem = ice_malloc(hw, buf_len);
676 hw = ICE_VSI_TO_HW(vsi);
678 memset(&tx_ctx, 0, sizeof(tx_ctx));
679 txq_elem->num_txqs = 1;
680 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
682 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
683 tx_ctx.qlen = txq->nb_tx_desc;
684 tx_ctx.pf_num = hw->pf_id;
685 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
686 tx_ctx.src_vsi = vsi->vsi_id;
687 tx_ctx.port_num = hw->port_info->lport;
688 tx_ctx.tso_ena = 1; /* tso enable */
689 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
690 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
692 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
695 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
697 /* Init the Tx tail register*/
698 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
700 /* Fix me, we assume TC always 0 here */
701 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
702 txq_elem, buf_len, NULL);
704 PMD_DRV_LOG(ERR, "Failed to add lan txq");
708 /* store the schedule node id */
709 txq->q_teid = txq_elem->txqs[0].q_teid;
711 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
717 static enum ice_status
718 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
720 struct ice_vsi *vsi = rxq->vsi;
721 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
722 uint32_t rxdid = ICE_RXDID_LEGACY_1;
723 struct ice_rlan_ctx rx_ctx;
728 rxq->rx_buf_len = 1024;
730 memset(&rx_ctx, 0, sizeof(rx_ctx));
732 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
733 rx_ctx.qlen = rxq->nb_rx_desc;
734 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
735 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
736 rx_ctx.dtype = 0; /* No Header Split mode */
737 rx_ctx.dsize = 1; /* 32B descriptors */
738 rx_ctx.rxmax = ICE_ETH_MAX_LEN;
739 /* TPH: Transaction Layer Packet (TLP) processing hints */
740 rx_ctx.tphrdesc_ena = 1;
741 rx_ctx.tphwdesc_ena = 1;
742 rx_ctx.tphdata_ena = 1;
743 rx_ctx.tphhead_ena = 1;
744 /* Low Receive Queue Threshold defined in 64 descriptors units.
745 * When the number of free descriptors goes below the lrxqthresh,
746 * an immediate interrupt is triggered.
748 rx_ctx.lrxqthresh = 2;
749 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
752 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
754 /* Enable Flexible Descriptors in the queue context which
755 * allows this driver to select a specific receive descriptor format
757 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
758 QRXFLXP_CNTXT_RXDID_IDX_M;
760 /* increasing context priority to pick up profile ID;
761 * default is 0x01; setting to 0x03 to ensure profile
762 * is programming if prev context is of same priority
764 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
765 QRXFLXP_CNTXT_RXDID_PRIO_M;
767 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
769 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
771 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
775 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
777 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
782 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
784 /* Init the Rx tail register*/
785 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
791 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
793 struct ice_rx_queue *rxq;
795 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
796 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
798 PMD_INIT_FUNC_TRACE();
801 if (!rxq || !rxq->q_set) {
802 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
807 err = ice_fdir_program_hw_rx_queue(rxq);
809 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
814 /* Init the RX tail register. */
815 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
817 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
819 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
822 ice_reset_rx_queue(rxq);
830 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
832 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
833 struct ice_tx_queue *txq;
837 struct ice_aqc_add_tx_qgrp *txq_elem;
838 struct ice_tlan_ctx tx_ctx;
841 PMD_INIT_FUNC_TRACE();
844 if (!txq || !txq->q_set) {
845 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
850 buf_len = ice_struct_size(txq_elem, txqs, 1);
851 txq_elem = ice_malloc(hw, buf_len);
856 hw = ICE_VSI_TO_HW(vsi);
858 memset(&tx_ctx, 0, sizeof(tx_ctx));
859 txq_elem->num_txqs = 1;
860 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
862 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
863 tx_ctx.qlen = txq->nb_tx_desc;
864 tx_ctx.pf_num = hw->pf_id;
865 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
866 tx_ctx.src_vsi = vsi->vsi_id;
867 tx_ctx.port_num = hw->port_info->lport;
868 tx_ctx.tso_ena = 1; /* tso enable */
869 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
870 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
872 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
875 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
877 /* Init the Tx tail register*/
878 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
880 /* Fix me, we assume TC always 0 here */
881 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
882 txq_elem, buf_len, NULL);
884 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
888 /* store the schedule node id */
889 txq->q_teid = txq_elem->txqs[0].q_teid;
895 /* Free all mbufs for descriptors in tx queue */
897 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
901 if (!txq || !txq->sw_ring) {
902 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
906 for (i = 0; i < txq->nb_tx_desc; i++) {
907 if (txq->sw_ring[i].mbuf) {
908 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
909 txq->sw_ring[i].mbuf = NULL;
915 ice_reset_tx_queue(struct ice_tx_queue *txq)
917 struct ice_tx_entry *txe;
918 uint16_t i, prev, size;
921 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
926 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
927 for (i = 0; i < size; i++)
928 ((volatile char *)txq->tx_ring)[i] = 0;
930 prev = (uint16_t)(txq->nb_tx_desc - 1);
931 for (i = 0; i < txq->nb_tx_desc; i++) {
932 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
934 txd->cmd_type_offset_bsz =
935 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
938 txe[prev].next_id = i;
942 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
943 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
948 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
949 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
953 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
955 struct ice_tx_queue *txq;
956 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
957 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
958 struct ice_vsi *vsi = pf->main_vsi;
959 enum ice_status status;
962 uint16_t q_handle = tx_queue_id;
964 if (tx_queue_id >= dev->data->nb_tx_queues) {
965 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
966 tx_queue_id, dev->data->nb_tx_queues);
970 txq = dev->data->tx_queues[tx_queue_id];
972 PMD_DRV_LOG(ERR, "TX queue %u is not available",
977 q_ids[0] = txq->reg_idx;
978 q_teids[0] = txq->q_teid;
980 /* Fix me, we assume TC always 0 here */
981 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
982 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
983 if (status != ICE_SUCCESS) {
984 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
988 txq->tx_rel_mbufs(txq);
989 ice_reset_tx_queue(txq);
990 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
996 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
998 struct ice_rx_queue *rxq;
1000 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1001 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1005 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1007 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1011 rxq->rx_rel_mbufs(rxq);
1017 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1019 struct ice_tx_queue *txq;
1020 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1021 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1022 struct ice_vsi *vsi = pf->main_vsi;
1023 enum ice_status status;
1025 uint32_t q_teids[1];
1026 uint16_t q_handle = tx_queue_id;
1030 PMD_DRV_LOG(ERR, "TX queue %u is not available",
1036 q_ids[0] = txq->reg_idx;
1037 q_teids[0] = txq->q_teid;
1039 /* Fix me, we assume TC always 0 here */
1040 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1041 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1042 if (status != ICE_SUCCESS) {
1043 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1047 txq->tx_rel_mbufs(txq);
1053 ice_rx_queue_setup(struct rte_eth_dev *dev,
1056 unsigned int socket_id,
1057 const struct rte_eth_rxconf *rx_conf,
1058 struct rte_mempool *mp)
1060 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1061 struct ice_adapter *ad =
1062 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1063 struct ice_vsi *vsi = pf->main_vsi;
1064 struct ice_rx_queue *rxq;
1065 const struct rte_memzone *rz;
1068 int use_def_burst_func = 1;
1071 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1072 nb_desc > ICE_MAX_RING_DESC ||
1073 nb_desc < ICE_MIN_RING_DESC) {
1074 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1075 "invalid", nb_desc);
1079 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1081 /* Free memory if needed */
1082 if (dev->data->rx_queues[queue_idx]) {
1083 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1084 dev->data->rx_queues[queue_idx] = NULL;
1087 /* Allocate the rx queue data structure */
1088 rxq = rte_zmalloc_socket(NULL,
1089 sizeof(struct ice_rx_queue),
1090 RTE_CACHE_LINE_SIZE,
1093 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1094 "rx queue data structure");
1098 rxq->nb_rx_desc = nb_desc;
1099 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1100 rxq->queue_id = queue_idx;
1101 rxq->offloads = offloads;
1103 rxq->reg_idx = vsi->base_queue + queue_idx;
1104 rxq->port_id = dev->data->port_id;
1105 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1106 rxq->crc_len = RTE_ETHER_CRC_LEN;
1110 rxq->drop_en = rx_conf->rx_drop_en;
1112 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1113 rxq->proto_xtr = pf->proto_xtr != NULL ?
1114 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1116 /* Allocate the maximun number of RX ring hardware descriptor. */
1117 len = ICE_MAX_RING_DESC;
1120 * Allocating a little more memory because vectorized/bulk_alloc Rx
1121 * functions doesn't check boundaries each time.
1123 len += ICE_RX_MAX_BURST;
1125 /* Allocate the maximum number of RX ring hardware descriptor. */
1126 ring_size = sizeof(union ice_rx_flex_desc) * len;
1127 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1128 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1129 ring_size, ICE_RING_BASE_ALIGN,
1132 ice_rx_queue_release(rxq);
1133 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1137 /* Zero all the descriptors in the ring. */
1138 memset(rz->addr, 0, ring_size);
1140 rxq->rx_ring_dma = rz->iova;
1141 rxq->rx_ring = rz->addr;
1143 /* always reserve more for bulk alloc */
1144 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1146 /* Allocate the software ring. */
1147 rxq->sw_ring = rte_zmalloc_socket(NULL,
1148 sizeof(struct ice_rx_entry) * len,
1149 RTE_CACHE_LINE_SIZE,
1151 if (!rxq->sw_ring) {
1152 ice_rx_queue_release(rxq);
1153 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1157 ice_reset_rx_queue(rxq);
1159 dev->data->rx_queues[queue_idx] = rxq;
1160 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1162 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1164 if (!use_def_burst_func) {
1165 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1166 "satisfied. Rx Burst Bulk Alloc function will be "
1167 "used on port=%d, queue=%d.",
1168 rxq->port_id, rxq->queue_id);
1170 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1171 "not satisfied, Scattered Rx is requested. "
1172 "on port=%d, queue=%d.",
1173 rxq->port_id, rxq->queue_id);
1174 ad->rx_bulk_alloc_allowed = false;
1181 ice_rx_queue_release(void *rxq)
1183 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1186 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1191 rte_free(q->sw_ring);
1196 ice_tx_queue_setup(struct rte_eth_dev *dev,
1199 unsigned int socket_id,
1200 const struct rte_eth_txconf *tx_conf)
1202 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1203 struct ice_vsi *vsi = pf->main_vsi;
1204 struct ice_tx_queue *txq;
1205 const struct rte_memzone *tz;
1207 uint16_t tx_rs_thresh, tx_free_thresh;
1210 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1212 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1213 nb_desc > ICE_MAX_RING_DESC ||
1214 nb_desc < ICE_MIN_RING_DESC) {
1215 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1216 "invalid", nb_desc);
1221 * The following two parameters control the setting of the RS bit on
1222 * transmit descriptors. TX descriptors will have their RS bit set
1223 * after txq->tx_rs_thresh descriptors have been used. The TX
1224 * descriptor ring will be cleaned after txq->tx_free_thresh
1225 * descriptors are used or if the number of descriptors required to
1226 * transmit a packet is greater than the number of free TX descriptors.
1228 * The following constraints must be satisfied:
1229 * - tx_rs_thresh must be greater than 0.
1230 * - tx_rs_thresh must be less than the size of the ring minus 2.
1231 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1232 * - tx_rs_thresh must be a divisor of the ring size.
1233 * - tx_free_thresh must be greater than 0.
1234 * - tx_free_thresh must be less than the size of the ring minus 3.
1235 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1237 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1238 * race condition, hence the maximum threshold constraints. When set
1239 * to zero use default values.
1241 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1242 tx_conf->tx_free_thresh :
1243 ICE_DEFAULT_TX_FREE_THRESH);
1244 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1246 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1247 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1248 if (tx_conf->tx_rs_thresh)
1249 tx_rs_thresh = tx_conf->tx_rs_thresh;
1250 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1251 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1252 "exceed nb_desc. (tx_rs_thresh=%u "
1253 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1254 (unsigned int)tx_rs_thresh,
1255 (unsigned int)tx_free_thresh,
1256 (unsigned int)nb_desc,
1257 (int)dev->data->port_id,
1261 if (tx_rs_thresh >= (nb_desc - 2)) {
1262 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1263 "number of TX descriptors minus 2. "
1264 "(tx_rs_thresh=%u port=%d queue=%d)",
1265 (unsigned int)tx_rs_thresh,
1266 (int)dev->data->port_id,
1270 if (tx_free_thresh >= (nb_desc - 3)) {
1271 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1272 "tx_free_thresh must be less than the "
1273 "number of TX descriptors minus 3. "
1274 "(tx_free_thresh=%u port=%d queue=%d)",
1275 (unsigned int)tx_free_thresh,
1276 (int)dev->data->port_id,
1280 if (tx_rs_thresh > tx_free_thresh) {
1281 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1282 "equal to tx_free_thresh. (tx_free_thresh=%u"
1283 " tx_rs_thresh=%u port=%d queue=%d)",
1284 (unsigned int)tx_free_thresh,
1285 (unsigned int)tx_rs_thresh,
1286 (int)dev->data->port_id,
1290 if ((nb_desc % tx_rs_thresh) != 0) {
1291 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1292 "number of TX descriptors. (tx_rs_thresh=%u"
1293 " port=%d queue=%d)",
1294 (unsigned int)tx_rs_thresh,
1295 (int)dev->data->port_id,
1299 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1300 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1301 "tx_rs_thresh is greater than 1. "
1302 "(tx_rs_thresh=%u port=%d queue=%d)",
1303 (unsigned int)tx_rs_thresh,
1304 (int)dev->data->port_id,
1309 /* Free memory if needed. */
1310 if (dev->data->tx_queues[queue_idx]) {
1311 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1312 dev->data->tx_queues[queue_idx] = NULL;
1315 /* Allocate the TX queue data structure. */
1316 txq = rte_zmalloc_socket(NULL,
1317 sizeof(struct ice_tx_queue),
1318 RTE_CACHE_LINE_SIZE,
1321 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1322 "tx queue structure");
1326 /* Allocate TX hardware ring descriptors. */
1327 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1328 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1329 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1330 ring_size, ICE_RING_BASE_ALIGN,
1333 ice_tx_queue_release(txq);
1334 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1338 txq->nb_tx_desc = nb_desc;
1339 txq->tx_rs_thresh = tx_rs_thresh;
1340 txq->tx_free_thresh = tx_free_thresh;
1341 txq->pthresh = tx_conf->tx_thresh.pthresh;
1342 txq->hthresh = tx_conf->tx_thresh.hthresh;
1343 txq->wthresh = tx_conf->tx_thresh.wthresh;
1344 txq->queue_id = queue_idx;
1346 txq->reg_idx = vsi->base_queue + queue_idx;
1347 txq->port_id = dev->data->port_id;
1348 txq->offloads = offloads;
1350 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1352 txq->tx_ring_dma = tz->iova;
1353 txq->tx_ring = tz->addr;
1355 /* Allocate software ring */
1357 rte_zmalloc_socket(NULL,
1358 sizeof(struct ice_tx_entry) * nb_desc,
1359 RTE_CACHE_LINE_SIZE,
1361 if (!txq->sw_ring) {
1362 ice_tx_queue_release(txq);
1363 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1367 ice_reset_tx_queue(txq);
1369 dev->data->tx_queues[queue_idx] = txq;
1370 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1371 ice_set_tx_function_flag(dev, txq);
1377 ice_tx_queue_release(void *txq)
1379 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1382 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1387 rte_free(q->sw_ring);
1392 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1393 struct rte_eth_rxq_info *qinfo)
1395 struct ice_rx_queue *rxq;
1397 rxq = dev->data->rx_queues[queue_id];
1399 qinfo->mp = rxq->mp;
1400 qinfo->scattered_rx = dev->data->scattered_rx;
1401 qinfo->nb_desc = rxq->nb_rx_desc;
1403 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1404 qinfo->conf.rx_drop_en = rxq->drop_en;
1405 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1409 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1410 struct rte_eth_txq_info *qinfo)
1412 struct ice_tx_queue *txq;
1414 txq = dev->data->tx_queues[queue_id];
1416 qinfo->nb_desc = txq->nb_tx_desc;
1418 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1419 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1420 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1422 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1423 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1424 qinfo->conf.offloads = txq->offloads;
1425 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1429 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1431 #define ICE_RXQ_SCAN_INTERVAL 4
1432 volatile union ice_rx_flex_desc *rxdp;
1433 struct ice_rx_queue *rxq;
1436 rxq = dev->data->rx_queues[rx_queue_id];
1437 rxdp = &rxq->rx_ring[rxq->rx_tail];
1438 while ((desc < rxq->nb_rx_desc) &&
1439 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1440 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1442 * Check the DD bit of a rx descriptor of each 4 in a group,
1443 * to avoid checking too frequently and downgrading performance
1446 desc += ICE_RXQ_SCAN_INTERVAL;
1447 rxdp += ICE_RXQ_SCAN_INTERVAL;
1448 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1449 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1450 desc - rxq->nb_rx_desc]);
1456 #define ICE_RX_FLEX_ERR0_BITS \
1457 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1458 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1459 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1460 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1461 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1462 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1464 /* Rx L3/L4 checksum */
1465 static inline uint64_t
1466 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1470 /* check if HW has decoded the packet and checksum */
1471 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1474 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1475 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1479 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1480 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1482 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1484 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1485 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1487 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1489 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1490 flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1492 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1493 flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
1495 flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
1501 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1503 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1504 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1505 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1507 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1508 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1509 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1514 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1515 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1516 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1517 mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
1518 RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
1519 mb->vlan_tci_outer = mb->vlan_tci;
1520 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1521 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1522 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1523 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1525 mb->vlan_tci_outer = 0;
1528 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1529 mb->vlan_tci, mb->vlan_tci_outer);
1532 #define ICE_LOOK_AHEAD 8
1533 #if (ICE_LOOK_AHEAD != 8)
1534 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1537 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1539 volatile union ice_rx_flex_desc *rxdp;
1540 struct ice_rx_entry *rxep;
1541 struct rte_mbuf *mb;
1544 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1545 int32_t i, j, nb_rx = 0;
1546 uint64_t pkt_flags = 0;
1547 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1549 rxdp = &rxq->rx_ring[rxq->rx_tail];
1550 rxep = &rxq->sw_ring[rxq->rx_tail];
1552 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1554 /* Make sure there is at least 1 packet to receive */
1555 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1559 * Scan LOOK_AHEAD descriptors at a time to determine which
1560 * descriptors reference packets that are ready to be received.
1562 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1563 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1564 /* Read desc statuses backwards to avoid race condition */
1565 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1566 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1570 /* Compute how many status bits were set */
1571 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1572 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1576 /* Translate descriptor info to mbuf parameters */
1577 for (j = 0; j < nb_dd; j++) {
1579 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1580 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1581 mb->data_len = pkt_len;
1582 mb->pkt_len = pkt_len;
1584 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1585 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1586 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1587 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1588 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1589 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1591 mb->ol_flags |= pkt_flags;
1594 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1595 rxq->rx_stage[i + j] = rxep[j].mbuf;
1597 if (nb_dd != ICE_LOOK_AHEAD)
1601 /* Clear software ring entries */
1602 for (i = 0; i < nb_rx; i++)
1603 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1605 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1606 "port_id=%u, queue_id=%u, nb_rx=%d",
1607 rxq->port_id, rxq->queue_id, nb_rx);
1612 static inline uint16_t
1613 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1614 struct rte_mbuf **rx_pkts,
1618 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1620 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1622 for (i = 0; i < nb_pkts; i++)
1623 rx_pkts[i] = stage[i];
1625 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1626 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1632 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1634 volatile union ice_rx_flex_desc *rxdp;
1635 struct ice_rx_entry *rxep;
1636 struct rte_mbuf *mb;
1637 uint16_t alloc_idx, i;
1641 /* Allocate buffers in bulk */
1642 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1643 (rxq->rx_free_thresh - 1));
1644 rxep = &rxq->sw_ring[alloc_idx];
1645 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1646 rxq->rx_free_thresh);
1647 if (unlikely(diag != 0)) {
1648 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1652 rxdp = &rxq->rx_ring[alloc_idx];
1653 for (i = 0; i < rxq->rx_free_thresh; i++) {
1654 if (likely(i < (rxq->rx_free_thresh - 1)))
1655 /* Prefetch next mbuf */
1656 rte_prefetch0(rxep[i + 1].mbuf);
1659 rte_mbuf_refcnt_set(mb, 1);
1661 mb->data_off = RTE_PKTMBUF_HEADROOM;
1663 mb->port = rxq->port_id;
1664 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1665 rxdp[i].read.hdr_addr = 0;
1666 rxdp[i].read.pkt_addr = dma_addr;
1669 /* Update rx tail regsiter */
1670 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1672 rxq->rx_free_trigger =
1673 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1674 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1675 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1680 static inline uint16_t
1681 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1683 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1689 if (rxq->rx_nb_avail)
1690 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1692 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1693 rxq->rx_next_avail = 0;
1694 rxq->rx_nb_avail = nb_rx;
1695 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1697 if (rxq->rx_tail > rxq->rx_free_trigger) {
1698 if (ice_rx_alloc_bufs(rxq) != 0) {
1701 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
1702 rxq->rx_free_thresh;
1703 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1704 "port_id=%u, queue_id=%u",
1705 rxq->port_id, rxq->queue_id);
1706 rxq->rx_nb_avail = 0;
1707 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1708 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1709 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1715 if (rxq->rx_tail >= rxq->nb_rx_desc)
1718 if (rxq->rx_nb_avail)
1719 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1725 ice_recv_pkts_bulk_alloc(void *rx_queue,
1726 struct rte_mbuf **rx_pkts,
1733 if (unlikely(nb_pkts == 0))
1736 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1737 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1740 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1741 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1742 nb_rx = (uint16_t)(nb_rx + count);
1743 nb_pkts = (uint16_t)(nb_pkts - count);
1752 ice_recv_scattered_pkts(void *rx_queue,
1753 struct rte_mbuf **rx_pkts,
1756 struct ice_rx_queue *rxq = rx_queue;
1757 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1758 volatile union ice_rx_flex_desc *rxdp;
1759 union ice_rx_flex_desc rxd;
1760 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1761 struct ice_rx_entry *rxe;
1762 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1763 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1764 struct rte_mbuf *nmb; /* new allocated mbuf */
1765 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1766 uint16_t rx_id = rxq->rx_tail;
1768 uint16_t nb_hold = 0;
1769 uint16_t rx_packet_len;
1770 uint16_t rx_stat_err0;
1773 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1775 while (nb_rx < nb_pkts) {
1776 rxdp = &rx_ring[rx_id];
1777 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1779 /* Check the DD bit first */
1780 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1784 nmb = rte_mbuf_raw_alloc(rxq->mp);
1785 if (unlikely(!nmb)) {
1786 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
1789 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1792 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1794 if (unlikely(rx_id == rxq->nb_rx_desc))
1797 /* Prefetch next mbuf */
1798 rte_prefetch0(sw_ring[rx_id].mbuf);
1801 * When next RX descriptor is on a cache line boundary,
1802 * prefetch the next 4 RX descriptors and next 8 pointers
1805 if ((rx_id & 0x3) == 0) {
1806 rte_prefetch0(&rx_ring[rx_id]);
1807 rte_prefetch0(&sw_ring[rx_id]);
1813 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1815 /* Set data buffer address and data length of the mbuf */
1816 rxdp->read.hdr_addr = 0;
1817 rxdp->read.pkt_addr = dma_addr;
1818 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1819 ICE_RX_FLX_DESC_PKT_LEN_M;
1820 rxm->data_len = rx_packet_len;
1821 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1824 * If this is the first buffer of the received packet, set the
1825 * pointer to the first mbuf of the packet and initialize its
1826 * context. Otherwise, update the total length and the number
1827 * of segments of the current scattered packet, and update the
1828 * pointer to the last mbuf of the current packet.
1832 first_seg->nb_segs = 1;
1833 first_seg->pkt_len = rx_packet_len;
1835 first_seg->pkt_len =
1836 (uint16_t)(first_seg->pkt_len +
1838 first_seg->nb_segs++;
1839 last_seg->next = rxm;
1843 * If this is not the last buffer of the received packet,
1844 * update the pointer to the last mbuf of the current scattered
1845 * packet and continue to parse the RX ring.
1847 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1853 * This is the last buffer of the received packet. If the CRC
1854 * is not stripped by the hardware:
1855 * - Subtract the CRC length from the total packet length.
1856 * - If the last buffer only contains the whole CRC or a part
1857 * of it, free the mbuf associated to the last buffer. If part
1858 * of the CRC is also contained in the previous mbuf, subtract
1859 * the length of that CRC part from the data length of the
1863 if (unlikely(rxq->crc_len > 0)) {
1864 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1865 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1866 rte_pktmbuf_free_seg(rxm);
1867 first_seg->nb_segs--;
1868 last_seg->data_len =
1869 (uint16_t)(last_seg->data_len -
1870 (RTE_ETHER_CRC_LEN - rx_packet_len));
1871 last_seg->next = NULL;
1873 rxm->data_len = (uint16_t)(rx_packet_len -
1877 first_seg->port = rxq->port_id;
1878 first_seg->ol_flags = 0;
1879 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1880 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1881 ice_rxd_to_vlan_tci(first_seg, &rxd);
1882 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1883 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1884 first_seg->ol_flags |= pkt_flags;
1885 /* Prefetch data of first segment, if configured to do so. */
1886 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1887 first_seg->data_off));
1888 rx_pkts[nb_rx++] = first_seg;
1892 /* Record index of the next RX descriptor to probe. */
1893 rxq->rx_tail = rx_id;
1894 rxq->pkt_first_seg = first_seg;
1895 rxq->pkt_last_seg = last_seg;
1898 * If the number of free RX descriptors is greater than the RX free
1899 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1900 * register. Update the RDT with the value of the last processed RX
1901 * descriptor minus 1, to guarantee that the RDT register is never
1902 * equal to the RDH register, which creates a "full" ring situtation
1903 * from the hardware point of view.
1905 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1906 if (nb_hold > rxq->rx_free_thresh) {
1907 rx_id = (uint16_t)(rx_id == 0 ?
1908 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1909 /* write TAIL register */
1910 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1913 rxq->nb_rx_hold = nb_hold;
1915 /* return received packet in the burst */
1920 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1922 struct ice_adapter *ad =
1923 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1924 const uint32_t *ptypes;
1926 static const uint32_t ptypes_os[] = {
1927 /* refers to ice_get_default_pkt_type() */
1929 RTE_PTYPE_L2_ETHER_TIMESYNC,
1930 RTE_PTYPE_L2_ETHER_LLDP,
1931 RTE_PTYPE_L2_ETHER_ARP,
1932 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1933 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1936 RTE_PTYPE_L4_NONFRAG,
1940 RTE_PTYPE_TUNNEL_GRENAT,
1941 RTE_PTYPE_TUNNEL_IP,
1942 RTE_PTYPE_INNER_L2_ETHER,
1943 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1944 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1945 RTE_PTYPE_INNER_L4_FRAG,
1946 RTE_PTYPE_INNER_L4_ICMP,
1947 RTE_PTYPE_INNER_L4_NONFRAG,
1948 RTE_PTYPE_INNER_L4_SCTP,
1949 RTE_PTYPE_INNER_L4_TCP,
1950 RTE_PTYPE_INNER_L4_UDP,
1954 static const uint32_t ptypes_comms[] = {
1955 /* refers to ice_get_default_pkt_type() */
1957 RTE_PTYPE_L2_ETHER_TIMESYNC,
1958 RTE_PTYPE_L2_ETHER_LLDP,
1959 RTE_PTYPE_L2_ETHER_ARP,
1960 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1961 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1964 RTE_PTYPE_L4_NONFRAG,
1968 RTE_PTYPE_TUNNEL_GRENAT,
1969 RTE_PTYPE_TUNNEL_IP,
1970 RTE_PTYPE_INNER_L2_ETHER,
1971 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1972 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1973 RTE_PTYPE_INNER_L4_FRAG,
1974 RTE_PTYPE_INNER_L4_ICMP,
1975 RTE_PTYPE_INNER_L4_NONFRAG,
1976 RTE_PTYPE_INNER_L4_SCTP,
1977 RTE_PTYPE_INNER_L4_TCP,
1978 RTE_PTYPE_INNER_L4_UDP,
1979 RTE_PTYPE_TUNNEL_GTPC,
1980 RTE_PTYPE_TUNNEL_GTPU,
1981 RTE_PTYPE_L2_ETHER_PPPOE,
1985 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1986 ptypes = ptypes_comms;
1990 if (dev->rx_pkt_burst == ice_recv_pkts ||
1991 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1992 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1996 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1997 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1998 #ifdef CC_AVX512_SUPPORT
1999 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
2000 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
2001 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
2002 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2004 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2005 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
2006 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
2007 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
2015 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2017 volatile union ice_rx_flex_desc *rxdp;
2018 struct ice_rx_queue *rxq = rx_queue;
2021 if (unlikely(offset >= rxq->nb_rx_desc))
2024 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2025 return RTE_ETH_RX_DESC_UNAVAIL;
2027 desc = rxq->rx_tail + offset;
2028 if (desc >= rxq->nb_rx_desc)
2029 desc -= rxq->nb_rx_desc;
2031 rxdp = &rxq->rx_ring[desc];
2032 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2033 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2034 return RTE_ETH_RX_DESC_DONE;
2036 return RTE_ETH_RX_DESC_AVAIL;
2040 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2042 struct ice_tx_queue *txq = tx_queue;
2043 volatile uint64_t *status;
2044 uint64_t mask, expect;
2047 if (unlikely(offset >= txq->nb_tx_desc))
2050 desc = txq->tx_tail + offset;
2051 /* go to next desc that has the RS bit */
2052 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2054 if (desc >= txq->nb_tx_desc) {
2055 desc -= txq->nb_tx_desc;
2056 if (desc >= txq->nb_tx_desc)
2057 desc -= txq->nb_tx_desc;
2060 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2061 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2062 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2063 ICE_TXD_QW1_DTYPE_S);
2064 if ((*status & mask) == expect)
2065 return RTE_ETH_TX_DESC_DONE;
2067 return RTE_ETH_TX_DESC_FULL;
2071 ice_free_queues(struct rte_eth_dev *dev)
2075 PMD_INIT_FUNC_TRACE();
2077 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2078 if (!dev->data->rx_queues[i])
2080 ice_rx_queue_release(dev->data->rx_queues[i]);
2081 dev->data->rx_queues[i] = NULL;
2082 rte_eth_dma_zone_free(dev, "rx_ring", i);
2084 dev->data->nb_rx_queues = 0;
2086 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2087 if (!dev->data->tx_queues[i])
2089 ice_tx_queue_release(dev->data->tx_queues[i]);
2090 dev->data->tx_queues[i] = NULL;
2091 rte_eth_dma_zone_free(dev, "tx_ring", i);
2093 dev->data->nb_tx_queues = 0;
2096 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
2097 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
2100 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2102 struct ice_tx_queue *txq;
2103 const struct rte_memzone *tz = NULL;
2105 struct rte_eth_dev *dev;
2108 PMD_DRV_LOG(ERR, "PF is not available");
2112 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2114 /* Allocate the TX queue data structure. */
2115 txq = rte_zmalloc_socket("ice fdir tx queue",
2116 sizeof(struct ice_tx_queue),
2117 RTE_CACHE_LINE_SIZE,
2120 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2121 "tx queue structure.");
2125 /* Allocate TX hardware ring descriptors. */
2126 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2127 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2129 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2130 ICE_FDIR_QUEUE_ID, ring_size,
2131 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2133 ice_tx_queue_release(txq);
2134 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2138 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2139 txq->queue_id = ICE_FDIR_QUEUE_ID;
2140 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2141 txq->vsi = pf->fdir.fdir_vsi;
2143 txq->tx_ring_dma = tz->iova;
2144 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2146 * don't need to allocate software ring and reset for the fdir
2147 * program queue just set the queue has been configured.
2152 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2158 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2160 struct ice_rx_queue *rxq;
2161 const struct rte_memzone *rz = NULL;
2163 struct rte_eth_dev *dev;
2166 PMD_DRV_LOG(ERR, "PF is not available");
2170 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2172 /* Allocate the RX queue data structure. */
2173 rxq = rte_zmalloc_socket("ice fdir rx queue",
2174 sizeof(struct ice_rx_queue),
2175 RTE_CACHE_LINE_SIZE,
2178 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2179 "rx queue structure.");
2183 /* Allocate RX hardware ring descriptors. */
2184 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2185 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2187 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2188 ICE_FDIR_QUEUE_ID, ring_size,
2189 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2191 ice_rx_queue_release(rxq);
2192 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2196 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2197 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2198 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2199 rxq->vsi = pf->fdir.fdir_vsi;
2201 rxq->rx_ring_dma = rz->iova;
2202 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2203 sizeof(union ice_32byte_rx_desc));
2204 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2207 * Don't need to allocate software ring and reset for the fdir
2208 * rx queue, just set the queue has been configured.
2213 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2219 ice_recv_pkts(void *rx_queue,
2220 struct rte_mbuf **rx_pkts,
2223 struct ice_rx_queue *rxq = rx_queue;
2224 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2225 volatile union ice_rx_flex_desc *rxdp;
2226 union ice_rx_flex_desc rxd;
2227 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2228 struct ice_rx_entry *rxe;
2229 struct rte_mbuf *nmb; /* new allocated mbuf */
2230 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2231 uint16_t rx_id = rxq->rx_tail;
2233 uint16_t nb_hold = 0;
2234 uint16_t rx_packet_len;
2235 uint16_t rx_stat_err0;
2238 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2240 while (nb_rx < nb_pkts) {
2241 rxdp = &rx_ring[rx_id];
2242 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2244 /* Check the DD bit first */
2245 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2249 nmb = rte_mbuf_raw_alloc(rxq->mp);
2250 if (unlikely(!nmb)) {
2251 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2254 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2257 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2259 if (unlikely(rx_id == rxq->nb_rx_desc))
2264 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2267 * fill the read format of descriptor with physic address in
2268 * new allocated mbuf: nmb
2270 rxdp->read.hdr_addr = 0;
2271 rxdp->read.pkt_addr = dma_addr;
2273 /* calculate rx_packet_len of the received pkt */
2274 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2275 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2277 /* fill old mbuf with received descriptor: rxd */
2278 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2279 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2282 rxm->pkt_len = rx_packet_len;
2283 rxm->data_len = rx_packet_len;
2284 rxm->port = rxq->port_id;
2285 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2286 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2287 ice_rxd_to_vlan_tci(rxm, &rxd);
2288 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
2289 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2290 rxm->ol_flags |= pkt_flags;
2291 /* copy old mbuf to rx_pkts */
2292 rx_pkts[nb_rx++] = rxm;
2294 rxq->rx_tail = rx_id;
2296 * If the number of free RX descriptors is greater than the RX free
2297 * threshold of the queue, advance the receive tail register of queue.
2298 * Update that register with the value of the last processed RX
2299 * descriptor minus 1.
2301 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2302 if (nb_hold > rxq->rx_free_thresh) {
2303 rx_id = (uint16_t)(rx_id == 0 ?
2304 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2305 /* write TAIL register */
2306 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2309 rxq->nb_rx_hold = nb_hold;
2311 /* return received packet in the burst */
2316 ice_parse_tunneling_params(uint64_t ol_flags,
2317 union ice_tx_offload tx_offload,
2318 uint32_t *cd_tunneling)
2320 /* EIPT: External (outer) IP header type */
2321 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
2322 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2323 else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
2324 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2325 else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
2326 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2328 /* EIPLEN: External (outer) IP header length, in DWords */
2329 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2330 ICE_TXD_CTX_QW0_EIPLEN_S;
2332 /* L4TUNT: L4 Tunneling Type */
2333 switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
2334 case RTE_MBUF_F_TX_TUNNEL_IPIP:
2335 /* for non UDP / GRE tunneling, set to 00b */
2337 case RTE_MBUF_F_TX_TUNNEL_VXLAN:
2338 case RTE_MBUF_F_TX_TUNNEL_GTP:
2339 case RTE_MBUF_F_TX_TUNNEL_GENEVE:
2340 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2342 case RTE_MBUF_F_TX_TUNNEL_GRE:
2343 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2346 PMD_TX_LOG(ERR, "Tunnel type not supported");
2350 /* L4TUNLEN: L4 Tunneling Length, in Words
2352 * We depend on app to set rte_mbuf.l2_len correctly.
2353 * For IP in GRE it should be set to the length of the GRE
2355 * For MAC in GRE or MAC in UDP it should be set to the length
2356 * of the GRE or UDP headers plus the inner MAC up to including
2357 * its last Ethertype.
2358 * If MPLS labels exists, it should include them as well.
2360 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2361 ICE_TXD_CTX_QW0_NATLEN_S;
2364 * Calculate the tunneling UDP checksum.
2365 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2367 if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
2368 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2369 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2373 ice_txd_enable_checksum(uint64_t ol_flags,
2375 uint32_t *td_offset,
2376 union ice_tx_offload tx_offload)
2379 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2380 *td_offset |= (tx_offload.outer_l2_len >> 1)
2381 << ICE_TX_DESC_LEN_MACLEN_S;
2383 *td_offset |= (tx_offload.l2_len >> 1)
2384 << ICE_TX_DESC_LEN_MACLEN_S;
2386 /* Enable L3 checksum offloads */
2387 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
2388 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2389 *td_offset |= (tx_offload.l3_len >> 2) <<
2390 ICE_TX_DESC_LEN_IPLEN_S;
2391 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
2392 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2393 *td_offset |= (tx_offload.l3_len >> 2) <<
2394 ICE_TX_DESC_LEN_IPLEN_S;
2395 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2396 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2397 *td_offset |= (tx_offload.l3_len >> 2) <<
2398 ICE_TX_DESC_LEN_IPLEN_S;
2401 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2402 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2403 *td_offset |= (tx_offload.l4_len >> 2) <<
2404 ICE_TX_DESC_LEN_L4_LEN_S;
2408 /* Enable L4 checksum offloads */
2409 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2410 case RTE_MBUF_F_TX_TCP_CKSUM:
2411 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2412 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2413 ICE_TX_DESC_LEN_L4_LEN_S;
2415 case RTE_MBUF_F_TX_SCTP_CKSUM:
2416 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2417 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2418 ICE_TX_DESC_LEN_L4_LEN_S;
2420 case RTE_MBUF_F_TX_UDP_CKSUM:
2421 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2422 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2423 ICE_TX_DESC_LEN_L4_LEN_S;
2431 ice_xmit_cleanup(struct ice_tx_queue *txq)
2433 struct ice_tx_entry *sw_ring = txq->sw_ring;
2434 volatile struct ice_tx_desc *txd = txq->tx_ring;
2435 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2436 uint16_t nb_tx_desc = txq->nb_tx_desc;
2437 uint16_t desc_to_clean_to;
2438 uint16_t nb_tx_to_clean;
2440 /* Determine the last descriptor needing to be cleaned */
2441 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2442 if (desc_to_clean_to >= nb_tx_desc)
2443 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2445 /* Check to make sure the last descriptor to clean is done */
2446 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2447 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2448 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2449 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2450 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2452 txq->port_id, txq->queue_id,
2453 txd[desc_to_clean_to].cmd_type_offset_bsz);
2454 /* Failed to clean any descriptors */
2458 /* Figure out how many descriptors will be cleaned */
2459 if (last_desc_cleaned > desc_to_clean_to)
2460 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2463 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2466 /* The last descriptor to clean is done, so that means all the
2467 * descriptors from the last descriptor that was cleaned
2468 * up to the last descriptor with the RS bit set
2469 * are done. Only reset the threshold descriptor.
2471 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2473 /* Update the txq to reflect the last descriptor that was cleaned */
2474 txq->last_desc_cleaned = desc_to_clean_to;
2475 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2480 /* Construct the tx flags */
2481 static inline uint64_t
2482 ice_build_ctob(uint32_t td_cmd,
2487 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2488 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2489 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2490 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2491 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2494 /* Check if the context descriptor is needed for TX offloading */
2495 static inline uint16_t
2496 ice_calc_context_desc(uint64_t flags)
2498 static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG |
2499 RTE_MBUF_F_TX_QINQ |
2500 RTE_MBUF_F_TX_OUTER_IP_CKSUM |
2501 RTE_MBUF_F_TX_TUNNEL_MASK;
2503 return (flags & mask) ? 1 : 0;
2506 /* set ice TSO context descriptor */
2507 static inline uint64_t
2508 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2510 uint64_t ctx_desc = 0;
2511 uint32_t cd_cmd, hdr_len, cd_tso_len;
2513 if (!tx_offload.l4_len) {
2514 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2518 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2519 hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
2520 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2522 cd_cmd = ICE_TX_CTX_DESC_TSO;
2523 cd_tso_len = mbuf->pkt_len - hdr_len;
2524 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2525 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2526 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2531 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2532 #define ICE_MAX_DATA_PER_TXD \
2533 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2534 /* Calculate the number of TX descriptors needed for each pkt */
2535 static inline uint16_t
2536 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2538 struct rte_mbuf *txd = tx_pkt;
2541 while (txd != NULL) {
2542 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2550 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2552 struct ice_tx_queue *txq;
2553 volatile struct ice_tx_desc *tx_ring;
2554 volatile struct ice_tx_desc *txd;
2555 struct ice_tx_entry *sw_ring;
2556 struct ice_tx_entry *txe, *txn;
2557 struct rte_mbuf *tx_pkt;
2558 struct rte_mbuf *m_seg;
2559 uint32_t cd_tunneling_params;
2564 uint32_t td_cmd = 0;
2565 uint32_t td_offset = 0;
2566 uint32_t td_tag = 0;
2569 uint64_t buf_dma_addr;
2571 union ice_tx_offload tx_offload = {0};
2574 sw_ring = txq->sw_ring;
2575 tx_ring = txq->tx_ring;
2576 tx_id = txq->tx_tail;
2577 txe = &sw_ring[tx_id];
2579 /* Check if the descriptor ring needs to be cleaned. */
2580 if (txq->nb_tx_free < txq->tx_free_thresh)
2581 (void)ice_xmit_cleanup(txq);
2583 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2584 tx_pkt = *tx_pkts++;
2589 ol_flags = tx_pkt->ol_flags;
2590 tx_offload.l2_len = tx_pkt->l2_len;
2591 tx_offload.l3_len = tx_pkt->l3_len;
2592 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2593 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2594 tx_offload.l4_len = tx_pkt->l4_len;
2595 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2596 /* Calculate the number of context descriptors needed. */
2597 nb_ctx = ice_calc_context_desc(ol_flags);
2599 /* The number of descriptors that must be allocated for
2600 * a packet equals to the number of the segments of that
2601 * packet plus the number of context descriptor if needed.
2602 * Recalculate the needed tx descs when TSO enabled in case
2603 * the mbuf data size exceeds max data size that hw allows
2606 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
2607 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2610 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2611 tx_last = (uint16_t)(tx_id + nb_used - 1);
2614 if (tx_last >= txq->nb_tx_desc)
2615 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2617 if (nb_used > txq->nb_tx_free) {
2618 if (ice_xmit_cleanup(txq) != 0) {
2623 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2624 while (nb_used > txq->nb_tx_free) {
2625 if (ice_xmit_cleanup(txq) != 0) {
2634 /* Descriptor based VLAN insertion */
2635 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
2636 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2637 td_tag = tx_pkt->vlan_tci;
2640 /* Fill in tunneling parameters if necessary */
2641 cd_tunneling_params = 0;
2642 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2643 ice_parse_tunneling_params(ol_flags, tx_offload,
2644 &cd_tunneling_params);
2646 /* Enable checksum offloading */
2647 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2648 ice_txd_enable_checksum(ol_flags, &td_cmd,
2649 &td_offset, tx_offload);
2652 /* Setup TX context descriptor if required */
2653 volatile struct ice_tx_ctx_desc *ctx_txd =
2654 (volatile struct ice_tx_ctx_desc *)
2656 uint16_t cd_l2tag2 = 0;
2657 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2659 txn = &sw_ring[txe->next_id];
2660 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2662 rte_pktmbuf_free_seg(txe->mbuf);
2666 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
2667 cd_type_cmd_tso_mss |=
2668 ice_set_tso_ctx(tx_pkt, tx_offload);
2670 ctx_txd->tunneling_params =
2671 rte_cpu_to_le_32(cd_tunneling_params);
2673 /* TX context descriptor based double VLAN insert */
2674 if (ol_flags & RTE_MBUF_F_TX_QINQ) {
2675 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2676 cd_type_cmd_tso_mss |=
2677 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2678 ICE_TXD_CTX_QW1_CMD_S);
2680 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2682 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2684 txe->last_id = tx_last;
2685 tx_id = txe->next_id;
2691 txd = &tx_ring[tx_id];
2692 txn = &sw_ring[txe->next_id];
2695 rte_pktmbuf_free_seg(txe->mbuf);
2698 /* Setup TX Descriptor */
2699 slen = m_seg->data_len;
2700 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2702 while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
2703 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2704 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2705 txd->cmd_type_offset_bsz =
2706 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2707 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2708 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2709 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2710 ICE_TXD_QW1_TX_BUF_SZ_S) |
2711 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2713 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2714 slen -= ICE_MAX_DATA_PER_TXD;
2716 txe->last_id = tx_last;
2717 tx_id = txe->next_id;
2719 txd = &tx_ring[tx_id];
2720 txn = &sw_ring[txe->next_id];
2723 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2724 txd->cmd_type_offset_bsz =
2725 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2726 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2727 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2728 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2729 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2731 txe->last_id = tx_last;
2732 tx_id = txe->next_id;
2734 m_seg = m_seg->next;
2737 /* fill the last descriptor with End of Packet (EOP) bit */
2738 td_cmd |= ICE_TX_DESC_CMD_EOP;
2739 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2740 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2742 /* set RS bit on the last descriptor of one packet */
2743 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2745 "Setting RS bit on TXD id="
2746 "%4u (port=%d queue=%d)",
2747 tx_last, txq->port_id, txq->queue_id);
2749 td_cmd |= ICE_TX_DESC_CMD_RS;
2751 /* Update txq RS bit counters */
2752 txq->nb_tx_used = 0;
2754 txd->cmd_type_offset_bsz |=
2755 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2759 /* update Tail register */
2760 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2761 txq->tx_tail = tx_id;
2766 static __rte_always_inline int
2767 ice_tx_free_bufs(struct ice_tx_queue *txq)
2769 struct ice_tx_entry *txep;
2772 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2773 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2774 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2777 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2779 for (i = 0; i < txq->tx_rs_thresh; i++)
2780 rte_prefetch0((txep + i)->mbuf);
2782 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2783 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2784 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2788 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2789 rte_pktmbuf_free_seg(txep->mbuf);
2794 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2795 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2796 if (txq->tx_next_dd >= txq->nb_tx_desc)
2797 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2799 return txq->tx_rs_thresh;
2803 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2806 struct ice_tx_entry *swr_ring = txq->sw_ring;
2807 uint16_t i, tx_last, tx_id;
2808 uint16_t nb_tx_free_last;
2809 uint16_t nb_tx_to_clean;
2812 /* Start free mbuf from the next of tx_tail */
2813 tx_last = txq->tx_tail;
2814 tx_id = swr_ring[tx_last].next_id;
2816 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2819 nb_tx_to_clean = txq->nb_tx_free;
2820 nb_tx_free_last = txq->nb_tx_free;
2822 free_cnt = txq->nb_tx_desc;
2824 /* Loop through swr_ring to count the amount of
2825 * freeable mubfs and packets.
2827 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2828 for (i = 0; i < nb_tx_to_clean &&
2829 pkt_cnt < free_cnt &&
2830 tx_id != tx_last; i++) {
2831 if (swr_ring[tx_id].mbuf != NULL) {
2832 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2833 swr_ring[tx_id].mbuf = NULL;
2836 * last segment in the packet,
2837 * increment packet count
2839 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2842 tx_id = swr_ring[tx_id].next_id;
2845 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2846 txq->nb_tx_free || tx_id == tx_last)
2849 if (pkt_cnt < free_cnt) {
2850 if (ice_xmit_cleanup(txq))
2853 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2854 nb_tx_free_last = txq->nb_tx_free;
2858 return (int)pkt_cnt;
2863 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2864 uint32_t free_cnt __rte_unused)
2871 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2876 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2877 free_cnt = txq->nb_tx_desc;
2879 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2881 for (i = 0; i < cnt; i += n) {
2882 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2885 n = ice_tx_free_bufs(txq);
2895 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2897 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2898 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2899 struct ice_adapter *ad =
2900 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2903 if (ad->tx_vec_allowed)
2904 return ice_tx_done_cleanup_vec(q, free_cnt);
2906 if (ad->tx_simple_allowed)
2907 return ice_tx_done_cleanup_simple(q, free_cnt);
2909 return ice_tx_done_cleanup_full(q, free_cnt);
2912 /* Populate 4 descriptors with data from 4 mbufs */
2914 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2919 for (i = 0; i < 4; i++, txdp++, pkts++) {
2920 dma_addr = rte_mbuf_data_iova(*pkts);
2921 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2922 txdp->cmd_type_offset_bsz =
2923 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2924 (*pkts)->data_len, 0);
2928 /* Populate 1 descriptor with data from 1 mbuf */
2930 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2934 dma_addr = rte_mbuf_data_iova(*pkts);
2935 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2936 txdp->cmd_type_offset_bsz =
2937 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2938 (*pkts)->data_len, 0);
2942 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2945 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2946 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2947 const int N_PER_LOOP = 4;
2948 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2949 int mainpart, leftover;
2953 * Process most of the packets in chunks of N pkts. Any
2954 * leftover packets will get processed one at a time.
2956 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2957 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2958 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2959 /* Copy N mbuf pointers to the S/W ring */
2960 for (j = 0; j < N_PER_LOOP; ++j)
2961 (txep + i + j)->mbuf = *(pkts + i + j);
2962 tx4(txdp + i, pkts + i);
2965 if (unlikely(leftover > 0)) {
2966 for (i = 0; i < leftover; ++i) {
2967 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2968 tx1(txdp + mainpart + i, pkts + mainpart + i);
2973 static inline uint16_t
2974 tx_xmit_pkts(struct ice_tx_queue *txq,
2975 struct rte_mbuf **tx_pkts,
2978 volatile struct ice_tx_desc *txr = txq->tx_ring;
2982 * Begin scanning the H/W ring for done descriptors when the number
2983 * of available descriptors drops below tx_free_thresh. For each done
2984 * descriptor, free the associated buffer.
2986 if (txq->nb_tx_free < txq->tx_free_thresh)
2987 ice_tx_free_bufs(txq);
2989 /* Use available descriptor only */
2990 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2991 if (unlikely(!nb_pkts))
2994 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2995 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2996 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2997 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2998 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2999 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3001 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3005 /* Fill hardware descriptor ring with mbuf data */
3006 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3007 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3009 /* Determin if RS bit needs to be set */
3010 if (txq->tx_tail > txq->tx_next_rs) {
3011 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3012 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3015 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3016 if (txq->tx_next_rs >= txq->nb_tx_desc)
3017 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3020 if (txq->tx_tail >= txq->nb_tx_desc)
3023 /* Update the tx tail register */
3024 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3030 ice_xmit_pkts_simple(void *tx_queue,
3031 struct rte_mbuf **tx_pkts,
3036 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3037 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3041 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3044 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3045 &tx_pkts[nb_tx], num);
3046 nb_tx = (uint16_t)(nb_tx + ret);
3047 nb_pkts = (uint16_t)(nb_pkts - ret);
3056 ice_set_rx_function(struct rte_eth_dev *dev)
3058 PMD_INIT_FUNC_TRACE();
3059 struct ice_adapter *ad =
3060 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3062 struct ice_rx_queue *rxq;
3064 int rx_check_ret = -1;
3066 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3067 ad->rx_use_avx512 = false;
3068 ad->rx_use_avx2 = false;
3069 rx_check_ret = ice_rx_vec_dev_check(dev);
3070 if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3071 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3072 ad->rx_vec_allowed = true;
3073 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3074 rxq = dev->data->rx_queues[i];
3075 if (rxq && ice_rxq_vec_setup(rxq)) {
3076 ad->rx_vec_allowed = false;
3081 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3082 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3083 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3084 #ifdef CC_AVX512_SUPPORT
3085 ad->rx_use_avx512 = true;
3088 "AVX512 is not supported in build env");
3090 if (!ad->rx_use_avx512 &&
3091 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3092 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3093 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3094 ad->rx_use_avx2 = true;
3097 ad->rx_vec_allowed = false;
3101 if (ad->rx_vec_allowed) {
3102 if (dev->data->scattered_rx) {
3103 if (ad->rx_use_avx512) {
3104 #ifdef CC_AVX512_SUPPORT
3105 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3107 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3108 dev->data->port_id);
3110 ice_recv_scattered_pkts_vec_avx512_offload;
3113 "Using AVX512 Vector Scattered Rx (port %d).",
3114 dev->data->port_id);
3116 ice_recv_scattered_pkts_vec_avx512;
3119 } else if (ad->rx_use_avx2) {
3120 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3122 "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
3123 dev->data->port_id);
3125 ice_recv_scattered_pkts_vec_avx2_offload;
3128 "Using AVX2 Vector Scattered Rx (port %d).",
3129 dev->data->port_id);
3131 ice_recv_scattered_pkts_vec_avx2;
3135 "Using Vector Scattered Rx (port %d).",
3136 dev->data->port_id);
3137 dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
3140 if (ad->rx_use_avx512) {
3141 #ifdef CC_AVX512_SUPPORT
3142 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3144 "Using AVX512 OFFLOAD Vector Rx (port %d).",
3145 dev->data->port_id);
3147 ice_recv_pkts_vec_avx512_offload;
3150 "Using AVX512 Vector Rx (port %d).",
3151 dev->data->port_id);
3153 ice_recv_pkts_vec_avx512;
3156 } else if (ad->rx_use_avx2) {
3157 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3159 "Using AVX2 OFFLOAD Vector Rx (port %d).",
3160 dev->data->port_id);
3162 ice_recv_pkts_vec_avx2_offload;
3165 "Using AVX2 Vector Rx (port %d).",
3166 dev->data->port_id);
3168 ice_recv_pkts_vec_avx2;
3172 "Using Vector Rx (port %d).",
3173 dev->data->port_id);
3174 dev->rx_pkt_burst = ice_recv_pkts_vec;
3182 if (dev->data->scattered_rx) {
3183 /* Set the non-LRO scattered function */
3185 "Using a Scattered function on port %d.",
3186 dev->data->port_id);
3187 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3188 } else if (ad->rx_bulk_alloc_allowed) {
3190 "Rx Burst Bulk Alloc Preconditions are "
3191 "satisfied. Rx Burst Bulk Alloc function "
3192 "will be used on port %d.",
3193 dev->data->port_id);
3194 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3197 "Rx Burst Bulk Alloc Preconditions are not "
3198 "satisfied, Normal Rx will be used on port %d.",
3199 dev->data->port_id);
3200 dev->rx_pkt_burst = ice_recv_pkts;
3204 static const struct {
3205 eth_rx_burst_t pkt_burst;
3207 } ice_rx_burst_infos[] = {
3208 { ice_recv_scattered_pkts, "Scalar Scattered" },
3209 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3210 { ice_recv_pkts, "Scalar" },
3212 #ifdef CC_AVX512_SUPPORT
3213 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3214 { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3215 { ice_recv_pkts_vec_avx512, "Vector AVX512" },
3216 { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3218 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3219 { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
3220 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3221 { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
3222 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3223 { ice_recv_pkts_vec, "Vector SSE" },
3228 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3229 struct rte_eth_burst_mode *mode)
3231 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3235 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3236 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3237 snprintf(mode->info, sizeof(mode->info), "%s",
3238 ice_rx_burst_infos[i].info);
3248 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3250 struct ice_adapter *ad =
3251 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3253 /* Use a simple Tx queue if possible (only fast free is allowed) */
3254 ad->tx_simple_allowed =
3256 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3257 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3259 if (ad->tx_simple_allowed)
3260 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3264 "Simple Tx can NOT be enabled on Tx queue %u.",
3268 /*********************************************************************
3272 **********************************************************************/
3273 /* The default values of TSO MSS */
3274 #define ICE_MIN_TSO_MSS 64
3275 #define ICE_MAX_TSO_MSS 9728
3276 #define ICE_MAX_TSO_FRAME_SIZE 262144
3278 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3285 for (i = 0; i < nb_pkts; i++) {
3287 ol_flags = m->ol_flags;
3289 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
3290 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3291 m->tso_segsz > ICE_MAX_TSO_MSS ||
3292 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3294 * MSS outside the range are considered malicious
3300 #ifdef RTE_ETHDEV_DEBUG_TX
3301 ret = rte_validate_tx_offload(m);
3307 ret = rte_net_intel_cksum_prepare(m);
3317 ice_set_tx_function(struct rte_eth_dev *dev)
3319 struct ice_adapter *ad =
3320 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3322 struct ice_tx_queue *txq;
3324 int tx_check_ret = -1;
3326 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3327 ad->tx_use_avx2 = false;
3328 ad->tx_use_avx512 = false;
3329 tx_check_ret = ice_tx_vec_dev_check(dev);
3330 if (tx_check_ret >= 0 &&
3331 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3332 ad->tx_vec_allowed = true;
3334 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3335 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3336 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3337 #ifdef CC_AVX512_SUPPORT
3338 ad->tx_use_avx512 = true;
3341 "AVX512 is not supported in build env");
3343 if (!ad->tx_use_avx512 &&
3344 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3345 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3346 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3347 ad->tx_use_avx2 = true;
3349 if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
3350 tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3351 ad->tx_vec_allowed = false;
3353 if (ad->tx_vec_allowed) {
3354 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3355 txq = dev->data->tx_queues[i];
3356 if (txq && ice_txq_vec_setup(txq)) {
3357 ad->tx_vec_allowed = false;
3363 ad->tx_vec_allowed = false;
3367 if (ad->tx_vec_allowed) {
3368 dev->tx_pkt_prepare = NULL;
3369 if (ad->tx_use_avx512) {
3370 #ifdef CC_AVX512_SUPPORT
3371 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3373 "Using AVX512 OFFLOAD Vector Tx (port %d).",
3374 dev->data->port_id);
3376 ice_xmit_pkts_vec_avx512_offload;
3377 dev->tx_pkt_prepare = ice_prep_pkts;
3380 "Using AVX512 Vector Tx (port %d).",
3381 dev->data->port_id);
3382 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3386 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3388 "Using AVX2 OFFLOAD Vector Tx (port %d).",
3389 dev->data->port_id);
3391 ice_xmit_pkts_vec_avx2_offload;
3392 dev->tx_pkt_prepare = ice_prep_pkts;
3394 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3395 ad->tx_use_avx2 ? "avx2 " : "",
3396 dev->data->port_id);
3397 dev->tx_pkt_burst = ad->tx_use_avx2 ?
3398 ice_xmit_pkts_vec_avx2 :
3407 if (ad->tx_simple_allowed) {
3408 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3409 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3410 dev->tx_pkt_prepare = NULL;
3412 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3413 dev->tx_pkt_burst = ice_xmit_pkts;
3414 dev->tx_pkt_prepare = ice_prep_pkts;
3418 static const struct {
3419 eth_tx_burst_t pkt_burst;
3421 } ice_tx_burst_infos[] = {
3422 { ice_xmit_pkts_simple, "Scalar Simple" },
3423 { ice_xmit_pkts, "Scalar" },
3425 #ifdef CC_AVX512_SUPPORT
3426 { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3427 { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3429 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3430 { ice_xmit_pkts_vec, "Vector SSE" },
3435 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3436 struct rte_eth_burst_mode *mode)
3438 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3442 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3443 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3444 snprintf(mode->info, sizeof(mode->info), "%s",
3445 ice_tx_burst_infos[i].info);
3454 /* For each value it means, datasheet of hardware can tell more details
3456 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3458 static inline uint32_t
3459 ice_get_default_pkt_type(uint16_t ptype)
3461 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3462 __rte_cache_aligned = {
3465 [1] = RTE_PTYPE_L2_ETHER,
3466 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3467 /* [3] - [5] reserved */
3468 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3469 /* [7] - [10] reserved */
3470 [11] = RTE_PTYPE_L2_ETHER_ARP,
3471 /* [12] - [21] reserved */
3473 /* Non tunneled IPv4 */
3474 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3476 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3477 RTE_PTYPE_L4_NONFRAG,
3478 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3481 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3483 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3485 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3489 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3490 RTE_PTYPE_TUNNEL_IP |
3491 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3492 RTE_PTYPE_INNER_L4_FRAG,
3493 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3494 RTE_PTYPE_TUNNEL_IP |
3495 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3496 RTE_PTYPE_INNER_L4_NONFRAG,
3497 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3498 RTE_PTYPE_TUNNEL_IP |
3499 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3500 RTE_PTYPE_INNER_L4_UDP,
3502 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3503 RTE_PTYPE_TUNNEL_IP |
3504 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3505 RTE_PTYPE_INNER_L4_TCP,
3506 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3507 RTE_PTYPE_TUNNEL_IP |
3508 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3509 RTE_PTYPE_INNER_L4_SCTP,
3510 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3511 RTE_PTYPE_TUNNEL_IP |
3512 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3513 RTE_PTYPE_INNER_L4_ICMP,
3516 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3517 RTE_PTYPE_TUNNEL_IP |
3518 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3519 RTE_PTYPE_INNER_L4_FRAG,
3520 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3521 RTE_PTYPE_TUNNEL_IP |
3522 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3523 RTE_PTYPE_INNER_L4_NONFRAG,
3524 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3525 RTE_PTYPE_TUNNEL_IP |
3526 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3527 RTE_PTYPE_INNER_L4_UDP,
3529 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3530 RTE_PTYPE_TUNNEL_IP |
3531 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3532 RTE_PTYPE_INNER_L4_TCP,
3533 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3534 RTE_PTYPE_TUNNEL_IP |
3535 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3536 RTE_PTYPE_INNER_L4_SCTP,
3537 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3538 RTE_PTYPE_TUNNEL_IP |
3539 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3540 RTE_PTYPE_INNER_L4_ICMP,
3542 /* IPv4 --> GRE/Teredo/VXLAN */
3543 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3544 RTE_PTYPE_TUNNEL_GRENAT,
3546 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3547 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3548 RTE_PTYPE_TUNNEL_GRENAT |
3549 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3550 RTE_PTYPE_INNER_L4_FRAG,
3551 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3552 RTE_PTYPE_TUNNEL_GRENAT |
3553 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3554 RTE_PTYPE_INNER_L4_NONFRAG,
3555 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3556 RTE_PTYPE_TUNNEL_GRENAT |
3557 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3558 RTE_PTYPE_INNER_L4_UDP,
3560 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3561 RTE_PTYPE_TUNNEL_GRENAT |
3562 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3563 RTE_PTYPE_INNER_L4_TCP,
3564 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3565 RTE_PTYPE_TUNNEL_GRENAT |
3566 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3567 RTE_PTYPE_INNER_L4_SCTP,
3568 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3569 RTE_PTYPE_TUNNEL_GRENAT |
3570 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3571 RTE_PTYPE_INNER_L4_ICMP,
3573 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3574 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3575 RTE_PTYPE_TUNNEL_GRENAT |
3576 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3577 RTE_PTYPE_INNER_L4_FRAG,
3578 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3579 RTE_PTYPE_TUNNEL_GRENAT |
3580 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3581 RTE_PTYPE_INNER_L4_NONFRAG,
3582 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3583 RTE_PTYPE_TUNNEL_GRENAT |
3584 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3585 RTE_PTYPE_INNER_L4_UDP,
3587 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3588 RTE_PTYPE_TUNNEL_GRENAT |
3589 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3590 RTE_PTYPE_INNER_L4_TCP,
3591 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3592 RTE_PTYPE_TUNNEL_GRENAT |
3593 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3594 RTE_PTYPE_INNER_L4_SCTP,
3595 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3596 RTE_PTYPE_TUNNEL_GRENAT |
3597 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3598 RTE_PTYPE_INNER_L4_ICMP,
3600 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3601 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3602 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3604 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3605 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3606 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3607 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3608 RTE_PTYPE_INNER_L4_FRAG,
3609 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3610 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3611 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3612 RTE_PTYPE_INNER_L4_NONFRAG,
3613 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3614 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3615 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3616 RTE_PTYPE_INNER_L4_UDP,
3618 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3619 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3620 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3621 RTE_PTYPE_INNER_L4_TCP,
3622 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3623 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3624 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3625 RTE_PTYPE_INNER_L4_SCTP,
3626 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3627 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3628 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3629 RTE_PTYPE_INNER_L4_ICMP,
3631 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3632 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3633 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3634 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3635 RTE_PTYPE_INNER_L4_FRAG,
3636 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3637 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3638 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3639 RTE_PTYPE_INNER_L4_NONFRAG,
3640 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3641 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3642 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3643 RTE_PTYPE_INNER_L4_UDP,
3645 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3646 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3647 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3648 RTE_PTYPE_INNER_L4_TCP,
3649 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3650 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3651 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3652 RTE_PTYPE_INNER_L4_SCTP,
3653 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3654 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3655 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3656 RTE_PTYPE_INNER_L4_ICMP,
3657 /* [73] - [87] reserved */
3659 /* Non tunneled IPv6 */
3660 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3662 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3663 RTE_PTYPE_L4_NONFRAG,
3664 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3667 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3669 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3671 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3675 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3676 RTE_PTYPE_TUNNEL_IP |
3677 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3678 RTE_PTYPE_INNER_L4_FRAG,
3679 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3680 RTE_PTYPE_TUNNEL_IP |
3681 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3682 RTE_PTYPE_INNER_L4_NONFRAG,
3683 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3684 RTE_PTYPE_TUNNEL_IP |
3685 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3686 RTE_PTYPE_INNER_L4_UDP,
3688 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3689 RTE_PTYPE_TUNNEL_IP |
3690 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3691 RTE_PTYPE_INNER_L4_TCP,
3692 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3693 RTE_PTYPE_TUNNEL_IP |
3694 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3695 RTE_PTYPE_INNER_L4_SCTP,
3696 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3697 RTE_PTYPE_TUNNEL_IP |
3698 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3699 RTE_PTYPE_INNER_L4_ICMP,
3702 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3703 RTE_PTYPE_TUNNEL_IP |
3704 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3705 RTE_PTYPE_INNER_L4_FRAG,
3706 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3707 RTE_PTYPE_TUNNEL_IP |
3708 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3709 RTE_PTYPE_INNER_L4_NONFRAG,
3710 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3711 RTE_PTYPE_TUNNEL_IP |
3712 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3713 RTE_PTYPE_INNER_L4_UDP,
3714 /* [105] reserved */
3715 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3716 RTE_PTYPE_TUNNEL_IP |
3717 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3718 RTE_PTYPE_INNER_L4_TCP,
3719 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3720 RTE_PTYPE_TUNNEL_IP |
3721 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3722 RTE_PTYPE_INNER_L4_SCTP,
3723 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3724 RTE_PTYPE_TUNNEL_IP |
3725 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3726 RTE_PTYPE_INNER_L4_ICMP,
3728 /* IPv6 --> GRE/Teredo/VXLAN */
3729 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3730 RTE_PTYPE_TUNNEL_GRENAT,
3732 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3733 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3734 RTE_PTYPE_TUNNEL_GRENAT |
3735 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3736 RTE_PTYPE_INNER_L4_FRAG,
3737 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3738 RTE_PTYPE_TUNNEL_GRENAT |
3739 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3740 RTE_PTYPE_INNER_L4_NONFRAG,
3741 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3742 RTE_PTYPE_TUNNEL_GRENAT |
3743 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3744 RTE_PTYPE_INNER_L4_UDP,
3745 /* [113] reserved */
3746 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3747 RTE_PTYPE_TUNNEL_GRENAT |
3748 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3749 RTE_PTYPE_INNER_L4_TCP,
3750 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3751 RTE_PTYPE_TUNNEL_GRENAT |
3752 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3753 RTE_PTYPE_INNER_L4_SCTP,
3754 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3755 RTE_PTYPE_TUNNEL_GRENAT |
3756 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3757 RTE_PTYPE_INNER_L4_ICMP,
3759 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3760 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3761 RTE_PTYPE_TUNNEL_GRENAT |
3762 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3763 RTE_PTYPE_INNER_L4_FRAG,
3764 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3765 RTE_PTYPE_TUNNEL_GRENAT |
3766 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3767 RTE_PTYPE_INNER_L4_NONFRAG,
3768 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3769 RTE_PTYPE_TUNNEL_GRENAT |
3770 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3771 RTE_PTYPE_INNER_L4_UDP,
3772 /* [120] reserved */
3773 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3774 RTE_PTYPE_TUNNEL_GRENAT |
3775 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3776 RTE_PTYPE_INNER_L4_TCP,
3777 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3778 RTE_PTYPE_TUNNEL_GRENAT |
3779 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3780 RTE_PTYPE_INNER_L4_SCTP,
3781 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3782 RTE_PTYPE_TUNNEL_GRENAT |
3783 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3784 RTE_PTYPE_INNER_L4_ICMP,
3786 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3787 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3788 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3790 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3791 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3792 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3793 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3794 RTE_PTYPE_INNER_L4_FRAG,
3795 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3796 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3797 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3798 RTE_PTYPE_INNER_L4_NONFRAG,
3799 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3800 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3801 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3802 RTE_PTYPE_INNER_L4_UDP,
3803 /* [128] reserved */
3804 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3805 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3806 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3807 RTE_PTYPE_INNER_L4_TCP,
3808 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3809 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3810 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3811 RTE_PTYPE_INNER_L4_SCTP,
3812 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3813 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3814 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3815 RTE_PTYPE_INNER_L4_ICMP,
3817 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3818 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3819 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3820 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3821 RTE_PTYPE_INNER_L4_FRAG,
3822 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3823 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3824 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3825 RTE_PTYPE_INNER_L4_NONFRAG,
3826 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3827 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3828 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3829 RTE_PTYPE_INNER_L4_UDP,
3830 /* [135] reserved */
3831 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3832 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3833 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3834 RTE_PTYPE_INNER_L4_TCP,
3835 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3836 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3837 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3838 RTE_PTYPE_INNER_L4_SCTP,
3839 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3840 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3841 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3842 RTE_PTYPE_INNER_L4_ICMP,
3843 /* [139] - [299] reserved */
3846 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3847 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3849 /* PPPoE --> IPv4 */
3850 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3851 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3853 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3854 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3855 RTE_PTYPE_L4_NONFRAG,
3856 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3857 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3859 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3860 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3862 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3863 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3865 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3866 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3869 /* PPPoE --> IPv6 */
3870 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3871 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3873 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3874 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3875 RTE_PTYPE_L4_NONFRAG,
3876 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3877 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3879 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3880 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3882 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3883 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3885 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3886 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3888 /* [314] - [324] reserved */
3890 /* IPv4/IPv6 --> GTPC/GTPU */
3891 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3892 RTE_PTYPE_TUNNEL_GTPC,
3893 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3894 RTE_PTYPE_TUNNEL_GTPC,
3895 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3896 RTE_PTYPE_TUNNEL_GTPC,
3897 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3898 RTE_PTYPE_TUNNEL_GTPC,
3899 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3900 RTE_PTYPE_TUNNEL_GTPU,
3901 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3902 RTE_PTYPE_TUNNEL_GTPU,
3904 /* IPv4 --> GTPU --> IPv4 */
3905 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3906 RTE_PTYPE_TUNNEL_GTPU |
3907 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3908 RTE_PTYPE_INNER_L4_FRAG,
3909 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3910 RTE_PTYPE_TUNNEL_GTPU |
3911 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3912 RTE_PTYPE_INNER_L4_NONFRAG,
3913 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3914 RTE_PTYPE_TUNNEL_GTPU |
3915 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3916 RTE_PTYPE_INNER_L4_UDP,
3917 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3918 RTE_PTYPE_TUNNEL_GTPU |
3919 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3920 RTE_PTYPE_INNER_L4_TCP,
3921 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3922 RTE_PTYPE_TUNNEL_GTPU |
3923 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3924 RTE_PTYPE_INNER_L4_ICMP,
3926 /* IPv6 --> GTPU --> IPv4 */
3927 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3928 RTE_PTYPE_TUNNEL_GTPU |
3929 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3930 RTE_PTYPE_INNER_L4_FRAG,
3931 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3932 RTE_PTYPE_TUNNEL_GTPU |
3933 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3934 RTE_PTYPE_INNER_L4_NONFRAG,
3935 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3936 RTE_PTYPE_TUNNEL_GTPU |
3937 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3938 RTE_PTYPE_INNER_L4_UDP,
3939 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3940 RTE_PTYPE_TUNNEL_GTPU |
3941 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3942 RTE_PTYPE_INNER_L4_TCP,
3943 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3944 RTE_PTYPE_TUNNEL_GTPU |
3945 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3946 RTE_PTYPE_INNER_L4_ICMP,
3948 /* IPv4 --> GTPU --> IPv6 */
3949 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3950 RTE_PTYPE_TUNNEL_GTPU |
3951 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3952 RTE_PTYPE_INNER_L4_FRAG,
3953 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3954 RTE_PTYPE_TUNNEL_GTPU |
3955 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3956 RTE_PTYPE_INNER_L4_NONFRAG,
3957 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3958 RTE_PTYPE_TUNNEL_GTPU |
3959 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3960 RTE_PTYPE_INNER_L4_UDP,
3961 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3962 RTE_PTYPE_TUNNEL_GTPU |
3963 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3964 RTE_PTYPE_INNER_L4_TCP,
3965 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3966 RTE_PTYPE_TUNNEL_GTPU |
3967 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3968 RTE_PTYPE_INNER_L4_ICMP,
3970 /* IPv6 --> GTPU --> IPv6 */
3971 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3972 RTE_PTYPE_TUNNEL_GTPU |
3973 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3974 RTE_PTYPE_INNER_L4_FRAG,
3975 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3976 RTE_PTYPE_TUNNEL_GTPU |
3977 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3978 RTE_PTYPE_INNER_L4_NONFRAG,
3979 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3980 RTE_PTYPE_TUNNEL_GTPU |
3981 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3982 RTE_PTYPE_INNER_L4_UDP,
3983 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3984 RTE_PTYPE_TUNNEL_GTPU |
3985 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3986 RTE_PTYPE_INNER_L4_TCP,
3987 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3988 RTE_PTYPE_TUNNEL_GTPU |
3989 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3990 RTE_PTYPE_INNER_L4_ICMP,
3992 /* IPv4 --> UDP ECPRI */
3993 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3995 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3997 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3999 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4001 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4003 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4005 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4007 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4009 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4011 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4014 /* IPV6 --> UDP ECPRI */
4015 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4017 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4019 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4021 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4023 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4025 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4027 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4029 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4031 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4033 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4035 /* All others reserved */
4038 return type_table[ptype];
4042 ice_set_default_ptype_table(struct rte_eth_dev *dev)
4044 struct ice_adapter *ad =
4045 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4048 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4049 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4052 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
4053 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
4054 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4055 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4056 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4058 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
4059 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
4060 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4061 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
4062 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
4063 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4066 * check the programming status descriptor in rx queue.
4067 * done after Programming Flow Director is programmed on
4071 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4073 volatile union ice_32byte_rx_desc *rxdp;
4080 rxdp = (volatile union ice_32byte_rx_desc *)
4081 (&rxq->rx_ring[rxq->rx_tail]);
4082 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4083 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4084 >> ICE_RXD_QW1_STATUS_S;
4086 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4088 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4089 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4090 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4091 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4093 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4094 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4095 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4096 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4100 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4101 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4103 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4107 rxdp->wb.qword1.status_error_len = 0;
4109 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4111 if (rxq->rx_tail == 0)
4112 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4114 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4120 #define ICE_FDIR_MAX_WAIT_US 10000
4123 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4125 struct ice_tx_queue *txq = pf->fdir.txq;
4126 struct ice_rx_queue *rxq = pf->fdir.rxq;
4127 volatile struct ice_fltr_desc *fdirdp;
4128 volatile struct ice_tx_desc *txdp;
4132 fdirdp = (volatile struct ice_fltr_desc *)
4133 (&txq->tx_ring[txq->tx_tail]);
4134 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4135 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4137 txdp = &txq->tx_ring[txq->tx_tail + 1];
4138 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4139 td_cmd = ICE_TX_DESC_CMD_EOP |
4140 ICE_TX_DESC_CMD_RS |
4141 ICE_TX_DESC_CMD_DUMMY;
4143 txdp->cmd_type_offset_bsz =
4144 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4147 if (txq->tx_tail >= txq->nb_tx_desc)
4149 /* Update the tx tail register */
4150 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4151 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4152 if ((txdp->cmd_type_offset_bsz &
4153 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4154 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4158 if (i >= ICE_FDIR_MAX_WAIT_US) {
4160 "Failed to program FDIR filter: time out to get DD on tx queue.");
4164 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4167 ret = ice_check_fdir_programming_status(rxq);
4175 "Failed to program FDIR filter: programming status reported.");