1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <ethdev_driver.h>
9 #include "rte_pmd_ice.h"
11 #include "ice_rxtx_vec_common.h"
13 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
17 PKT_TX_OUTER_IP_CKSUM)
19 /* Offset of mbuf dynamic field for protocol extraction data */
20 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
22 /* Mask of mbuf dynamic flags for protocol extraction type */
23 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
28 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
31 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
33 volatile union ice_rx_flex_desc *rxdp;
34 struct ice_rx_queue *rxq = rx_queue;
38 rxdp = &rxq->rx_ring[desc];
39 /* watch for changes in status bit */
40 pmc->addr = &rxdp->wb.status_error0;
43 * we expect the DD bit to be set to 1 if this descriptor was already
46 pmc->val = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
47 pmc->mask = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
49 /* register is 16-bit */
50 pmc->size = sizeof(uint16_t);
57 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
59 static uint8_t rxdid_map[] = {
60 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
61 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
62 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
63 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
64 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
65 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
66 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
69 return xtr_type < RTE_DIM(rxdid_map) ?
70 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
74 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
76 volatile union ice_rx_flex_desc *rxdp)
78 volatile struct ice_32b_rx_flex_desc_comms *desc =
79 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
80 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
82 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
83 mb->ol_flags |= PKT_RX_RSS_HASH;
84 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
87 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
88 if (desc->flow_id != 0xFFFFFFFF) {
89 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
90 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
96 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
98 volatile union ice_rx_flex_desc *rxdp)
100 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
101 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
102 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
106 if (desc->flow_id != 0xFFFFFFFF) {
107 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
108 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
111 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
112 stat_err = rte_le_to_cpu_16(desc->status_error0);
113 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
114 mb->ol_flags |= PKT_RX_RSS_HASH;
115 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
121 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
123 volatile union ice_rx_flex_desc *rxdp)
125 volatile struct ice_32b_rx_flex_desc_comms *desc =
126 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
129 stat_err = rte_le_to_cpu_16(desc->status_error0);
130 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
131 mb->ol_flags |= PKT_RX_RSS_HASH;
132 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
135 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
136 if (desc->flow_id != 0xFFFFFFFF) {
137 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
138 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
141 if (rxq->xtr_ol_flag) {
142 uint32_t metadata = 0;
144 stat_err = rte_le_to_cpu_16(desc->status_error1);
146 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
147 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
149 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
151 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
154 mb->ol_flags |= rxq->xtr_ol_flag;
156 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
163 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
165 volatile union ice_rx_flex_desc *rxdp)
167 volatile struct ice_32b_rx_flex_desc_comms *desc =
168 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
171 stat_err = rte_le_to_cpu_16(desc->status_error0);
172 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
173 mb->ol_flags |= PKT_RX_RSS_HASH;
174 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
177 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
178 if (desc->flow_id != 0xFFFFFFFF) {
179 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
180 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
183 if (rxq->xtr_ol_flag) {
184 uint32_t metadata = 0;
186 if (desc->flex_ts.flex.aux0 != 0xFFFF)
187 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
188 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
189 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
192 mb->ol_flags |= rxq->xtr_ol_flag;
194 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
201 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
204 case ICE_RXDID_COMMS_AUX_VLAN:
205 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
206 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
209 case ICE_RXDID_COMMS_AUX_IPV4:
210 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
211 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
214 case ICE_RXDID_COMMS_AUX_IPV6:
215 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
216 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
219 case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
220 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
221 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
224 case ICE_RXDID_COMMS_AUX_TCP:
225 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
226 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
229 case ICE_RXDID_COMMS_AUX_IP_OFFSET:
230 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
231 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
234 case ICE_RXDID_COMMS_GENERIC:
235 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
238 case ICE_RXDID_COMMS_OVS:
239 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
243 /* update this according to the RXDID for PROTO_XTR_NONE */
244 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
248 if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
249 rxq->xtr_ol_flag = 0;
252 static enum ice_status
253 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
255 struct ice_vsi *vsi = rxq->vsi;
256 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
257 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
258 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
259 struct ice_rlan_ctx rx_ctx;
261 uint16_t buf_size, len;
262 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
263 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
266 /* Set buffer size as the head split is disabled. */
267 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
268 RTE_PKTMBUF_HEADROOM);
270 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
271 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
272 rxq->max_pkt_len = RTE_MIN(len,
273 dev->data->dev_conf.rxmode.max_rx_pkt_len);
275 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
276 if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
277 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
278 PMD_DRV_LOG(ERR, "maximum packet length must "
279 "be larger than %u and smaller than %u,"
280 "as jumbo frame is enabled",
281 (uint32_t)ICE_ETH_MAX_LEN,
282 (uint32_t)ICE_FRAME_SIZE_MAX);
286 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
287 rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
288 PMD_DRV_LOG(ERR, "maximum packet length must be "
289 "larger than %u and smaller than %u, "
290 "as jumbo frame is disabled",
291 (uint32_t)RTE_ETHER_MIN_LEN,
292 (uint32_t)ICE_ETH_MAX_LEN);
297 memset(&rx_ctx, 0, sizeof(rx_ctx));
299 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
300 rx_ctx.qlen = rxq->nb_rx_desc;
301 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
302 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
303 rx_ctx.dtype = 0; /* No Header Split mode */
304 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
305 rx_ctx.dsize = 1; /* 32B descriptors */
307 rx_ctx.rxmax = rxq->max_pkt_len;
308 /* TPH: Transaction Layer Packet (TLP) processing hints */
309 rx_ctx.tphrdesc_ena = 1;
310 rx_ctx.tphwdesc_ena = 1;
311 rx_ctx.tphdata_ena = 1;
312 rx_ctx.tphhead_ena = 1;
313 /* Low Receive Queue Threshold defined in 64 descriptors units.
314 * When the number of free descriptors goes below the lrxqthresh,
315 * an immediate interrupt is triggered.
317 rx_ctx.lrxqthresh = 2;
318 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
321 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
323 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
325 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
326 rxq->port_id, rxq->queue_id, rxdid);
328 if (!(pf->supported_rxdid & BIT(rxdid))) {
329 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
334 ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
336 /* Enable Flexible Descriptors in the queue context which
337 * allows this driver to select a specific receive descriptor format
339 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
340 QRXFLXP_CNTXT_RXDID_IDX_M;
342 /* increasing context priority to pick up profile ID;
343 * default is 0x01; setting to 0x03 to ensure profile
344 * is programming if prev context is of same priority
346 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
347 QRXFLXP_CNTXT_RXDID_PRIO_M;
349 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
351 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
353 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
357 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
359 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
364 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
365 RTE_PKTMBUF_HEADROOM);
367 /* Check if scattered RX needs to be used. */
368 if (rxq->max_pkt_len > buf_size)
369 dev->data->scattered_rx = 1;
371 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
373 /* Init the Rx tail register*/
374 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
379 /* Allocate mbufs for all descriptors in rx queue */
381 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
383 struct ice_rx_entry *rxe = rxq->sw_ring;
387 for (i = 0; i < rxq->nb_rx_desc; i++) {
388 volatile union ice_rx_flex_desc *rxd;
389 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
391 if (unlikely(!mbuf)) {
392 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
396 rte_mbuf_refcnt_set(mbuf, 1);
398 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
400 mbuf->port = rxq->port_id;
403 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
405 rxd = &rxq->rx_ring[i];
406 rxd->read.pkt_addr = dma_addr;
407 rxd->read.hdr_addr = 0;
408 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
418 /* Free all mbufs for descriptors in rx queue */
420 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
424 if (!rxq || !rxq->sw_ring) {
425 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
429 for (i = 0; i < rxq->nb_rx_desc; i++) {
430 if (rxq->sw_ring[i].mbuf) {
431 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
432 rxq->sw_ring[i].mbuf = NULL;
435 if (rxq->rx_nb_avail == 0)
437 for (i = 0; i < rxq->rx_nb_avail; i++)
438 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
440 rxq->rx_nb_avail = 0;
443 /* turn on or off rx queue
444 * @q_idx: queue index in pf scope
445 * @on: turn on or off the queue
448 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
453 /* QRX_CTRL = QRX_ENA */
454 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
457 if (reg & QRX_CTRL_QENA_STAT_M)
458 return 0; /* Already on, skip */
459 reg |= QRX_CTRL_QENA_REQ_M;
461 if (!(reg & QRX_CTRL_QENA_STAT_M))
462 return 0; /* Already off, skip */
463 reg &= ~QRX_CTRL_QENA_REQ_M;
466 /* Write the register */
467 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
468 /* Check the result. It is said that QENA_STAT
469 * follows the QENA_REQ not more than 10 use.
470 * TODO: need to change the wait counter later
472 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
473 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
474 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
476 if ((reg & QRX_CTRL_QENA_REQ_M) &&
477 (reg & QRX_CTRL_QENA_STAT_M))
480 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
481 !(reg & QRX_CTRL_QENA_STAT_M))
486 /* Check if it is timeout */
487 if (j >= ICE_CHK_Q_ENA_COUNT) {
488 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
489 (on ? "enable" : "disable"), q_idx);
497 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
501 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
502 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
503 "rxq->rx_free_thresh=%d, "
504 "ICE_RX_MAX_BURST=%d",
505 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
507 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
508 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
509 "rxq->rx_free_thresh=%d, "
510 "rxq->nb_rx_desc=%d",
511 rxq->rx_free_thresh, rxq->nb_rx_desc);
513 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
514 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
515 "rxq->nb_rx_desc=%d, "
516 "rxq->rx_free_thresh=%d",
517 rxq->nb_rx_desc, rxq->rx_free_thresh);
524 /* reset fields in ice_rx_queue back to default */
526 ice_reset_rx_queue(struct ice_rx_queue *rxq)
532 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
536 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
538 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
539 ((volatile char *)rxq->rx_ring)[i] = 0;
541 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
542 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
543 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
545 rxq->rx_nb_avail = 0;
546 rxq->rx_next_avail = 0;
547 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
551 rxq->pkt_first_seg = NULL;
552 rxq->pkt_last_seg = NULL;
554 rxq->rxrearm_start = 0;
559 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
561 struct ice_rx_queue *rxq;
563 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
565 PMD_INIT_FUNC_TRACE();
567 if (rx_queue_id >= dev->data->nb_rx_queues) {
568 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
569 rx_queue_id, dev->data->nb_rx_queues);
573 rxq = dev->data->rx_queues[rx_queue_id];
574 if (!rxq || !rxq->q_set) {
575 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
580 err = ice_program_hw_rx_queue(rxq);
582 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
587 err = ice_alloc_rx_queue_mbufs(rxq);
589 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
593 /* Init the RX tail register. */
594 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
596 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
598 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
601 rxq->rx_rel_mbufs(rxq);
602 ice_reset_rx_queue(rxq);
606 dev->data->rx_queue_state[rx_queue_id] =
607 RTE_ETH_QUEUE_STATE_STARTED;
613 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
615 struct ice_rx_queue *rxq;
617 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
619 if (rx_queue_id < dev->data->nb_rx_queues) {
620 rxq = dev->data->rx_queues[rx_queue_id];
622 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
624 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
628 rxq->rx_rel_mbufs(rxq);
629 ice_reset_rx_queue(rxq);
630 dev->data->rx_queue_state[rx_queue_id] =
631 RTE_ETH_QUEUE_STATE_STOPPED;
638 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
640 struct ice_tx_queue *txq;
644 struct ice_aqc_add_tx_qgrp *txq_elem;
645 struct ice_tlan_ctx tx_ctx;
648 PMD_INIT_FUNC_TRACE();
650 if (tx_queue_id >= dev->data->nb_tx_queues) {
651 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
652 tx_queue_id, dev->data->nb_tx_queues);
656 txq = dev->data->tx_queues[tx_queue_id];
657 if (!txq || !txq->q_set) {
658 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
663 buf_len = ice_struct_size(txq_elem, txqs, 1);
664 txq_elem = ice_malloc(hw, buf_len);
669 hw = ICE_VSI_TO_HW(vsi);
671 memset(&tx_ctx, 0, sizeof(tx_ctx));
672 txq_elem->num_txqs = 1;
673 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
675 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
676 tx_ctx.qlen = txq->nb_tx_desc;
677 tx_ctx.pf_num = hw->pf_id;
678 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
679 tx_ctx.src_vsi = vsi->vsi_id;
680 tx_ctx.port_num = hw->port_info->lport;
681 tx_ctx.tso_ena = 1; /* tso enable */
682 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
683 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
685 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
688 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
690 /* Init the Tx tail register*/
691 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
693 /* Fix me, we assume TC always 0 here */
694 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
695 txq_elem, buf_len, NULL);
697 PMD_DRV_LOG(ERR, "Failed to add lan txq");
701 /* store the schedule node id */
702 txq->q_teid = txq_elem->txqs[0].q_teid;
704 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
710 static enum ice_status
711 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
713 struct ice_vsi *vsi = rxq->vsi;
714 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
715 uint32_t rxdid = ICE_RXDID_LEGACY_1;
716 struct ice_rlan_ctx rx_ctx;
721 rxq->rx_buf_len = 1024;
723 memset(&rx_ctx, 0, sizeof(rx_ctx));
725 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
726 rx_ctx.qlen = rxq->nb_rx_desc;
727 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
728 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
729 rx_ctx.dtype = 0; /* No Header Split mode */
730 rx_ctx.dsize = 1; /* 32B descriptors */
731 rx_ctx.rxmax = ICE_ETH_MAX_LEN;
732 /* TPH: Transaction Layer Packet (TLP) processing hints */
733 rx_ctx.tphrdesc_ena = 1;
734 rx_ctx.tphwdesc_ena = 1;
735 rx_ctx.tphdata_ena = 1;
736 rx_ctx.tphhead_ena = 1;
737 /* Low Receive Queue Threshold defined in 64 descriptors units.
738 * When the number of free descriptors goes below the lrxqthresh,
739 * an immediate interrupt is triggered.
741 rx_ctx.lrxqthresh = 2;
742 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
745 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
747 /* Enable Flexible Descriptors in the queue context which
748 * allows this driver to select a specific receive descriptor format
750 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
751 QRXFLXP_CNTXT_RXDID_IDX_M;
753 /* increasing context priority to pick up profile ID;
754 * default is 0x01; setting to 0x03 to ensure profile
755 * is programming if prev context is of same priority
757 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
758 QRXFLXP_CNTXT_RXDID_PRIO_M;
760 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
762 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
764 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
768 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
770 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
775 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
777 /* Init the Rx tail register*/
778 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
784 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
786 struct ice_rx_queue *rxq;
788 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
789 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
791 PMD_INIT_FUNC_TRACE();
794 if (!rxq || !rxq->q_set) {
795 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
800 err = ice_fdir_program_hw_rx_queue(rxq);
802 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
807 /* Init the RX tail register. */
808 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
810 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
812 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
815 ice_reset_rx_queue(rxq);
823 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
825 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
826 struct ice_tx_queue *txq;
830 struct ice_aqc_add_tx_qgrp *txq_elem;
831 struct ice_tlan_ctx tx_ctx;
834 PMD_INIT_FUNC_TRACE();
837 if (!txq || !txq->q_set) {
838 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
843 buf_len = ice_struct_size(txq_elem, txqs, 1);
844 txq_elem = ice_malloc(hw, buf_len);
849 hw = ICE_VSI_TO_HW(vsi);
851 memset(&tx_ctx, 0, sizeof(tx_ctx));
852 txq_elem->num_txqs = 1;
853 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
855 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
856 tx_ctx.qlen = txq->nb_tx_desc;
857 tx_ctx.pf_num = hw->pf_id;
858 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
859 tx_ctx.src_vsi = vsi->vsi_id;
860 tx_ctx.port_num = hw->port_info->lport;
861 tx_ctx.tso_ena = 1; /* tso enable */
862 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
863 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
865 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
868 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
870 /* Init the Tx tail register*/
871 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
873 /* Fix me, we assume TC always 0 here */
874 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
875 txq_elem, buf_len, NULL);
877 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
881 /* store the schedule node id */
882 txq->q_teid = txq_elem->txqs[0].q_teid;
888 /* Free all mbufs for descriptors in tx queue */
890 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
894 if (!txq || !txq->sw_ring) {
895 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
899 for (i = 0; i < txq->nb_tx_desc; i++) {
900 if (txq->sw_ring[i].mbuf) {
901 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
902 txq->sw_ring[i].mbuf = NULL;
908 ice_reset_tx_queue(struct ice_tx_queue *txq)
910 struct ice_tx_entry *txe;
911 uint16_t i, prev, size;
914 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
919 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
920 for (i = 0; i < size; i++)
921 ((volatile char *)txq->tx_ring)[i] = 0;
923 prev = (uint16_t)(txq->nb_tx_desc - 1);
924 for (i = 0; i < txq->nb_tx_desc; i++) {
925 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
927 txd->cmd_type_offset_bsz =
928 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
931 txe[prev].next_id = i;
935 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
936 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
941 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
942 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
946 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
948 struct ice_tx_queue *txq;
949 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
950 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
951 struct ice_vsi *vsi = pf->main_vsi;
952 enum ice_status status;
955 uint16_t q_handle = tx_queue_id;
957 if (tx_queue_id >= dev->data->nb_tx_queues) {
958 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
959 tx_queue_id, dev->data->nb_tx_queues);
963 txq = dev->data->tx_queues[tx_queue_id];
965 PMD_DRV_LOG(ERR, "TX queue %u is not available",
970 q_ids[0] = txq->reg_idx;
971 q_teids[0] = txq->q_teid;
973 /* Fix me, we assume TC always 0 here */
974 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
975 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
976 if (status != ICE_SUCCESS) {
977 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
981 txq->tx_rel_mbufs(txq);
982 ice_reset_tx_queue(txq);
983 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
989 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
991 struct ice_rx_queue *rxq;
993 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
994 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
998 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1000 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1004 rxq->rx_rel_mbufs(rxq);
1010 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1012 struct ice_tx_queue *txq;
1013 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1014 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1015 struct ice_vsi *vsi = pf->main_vsi;
1016 enum ice_status status;
1018 uint32_t q_teids[1];
1019 uint16_t q_handle = tx_queue_id;
1023 PMD_DRV_LOG(ERR, "TX queue %u is not available",
1029 q_ids[0] = txq->reg_idx;
1030 q_teids[0] = txq->q_teid;
1032 /* Fix me, we assume TC always 0 here */
1033 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1034 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1035 if (status != ICE_SUCCESS) {
1036 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1040 txq->tx_rel_mbufs(txq);
1046 ice_rx_queue_setup(struct rte_eth_dev *dev,
1049 unsigned int socket_id,
1050 const struct rte_eth_rxconf *rx_conf,
1051 struct rte_mempool *mp)
1053 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1054 struct ice_adapter *ad =
1055 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1056 struct ice_vsi *vsi = pf->main_vsi;
1057 struct ice_rx_queue *rxq;
1058 const struct rte_memzone *rz;
1061 int use_def_burst_func = 1;
1064 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1065 nb_desc > ICE_MAX_RING_DESC ||
1066 nb_desc < ICE_MIN_RING_DESC) {
1067 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1068 "invalid", nb_desc);
1072 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1074 /* Free memory if needed */
1075 if (dev->data->rx_queues[queue_idx]) {
1076 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1077 dev->data->rx_queues[queue_idx] = NULL;
1080 /* Allocate the rx queue data structure */
1081 rxq = rte_zmalloc_socket(NULL,
1082 sizeof(struct ice_rx_queue),
1083 RTE_CACHE_LINE_SIZE,
1086 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1087 "rx queue data structure");
1091 rxq->nb_rx_desc = nb_desc;
1092 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1093 rxq->queue_id = queue_idx;
1094 rxq->offloads = offloads;
1096 rxq->reg_idx = vsi->base_queue + queue_idx;
1097 rxq->port_id = dev->data->port_id;
1098 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1099 rxq->crc_len = RTE_ETHER_CRC_LEN;
1103 rxq->drop_en = rx_conf->rx_drop_en;
1105 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1106 rxq->proto_xtr = pf->proto_xtr != NULL ?
1107 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1109 /* Allocate the maximun number of RX ring hardware descriptor. */
1110 len = ICE_MAX_RING_DESC;
1113 * Allocating a little more memory because vectorized/bulk_alloc Rx
1114 * functions doesn't check boundaries each time.
1116 len += ICE_RX_MAX_BURST;
1118 /* Allocate the maximum number of RX ring hardware descriptor. */
1119 ring_size = sizeof(union ice_rx_flex_desc) * len;
1120 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1121 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1122 ring_size, ICE_RING_BASE_ALIGN,
1125 ice_rx_queue_release(rxq);
1126 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1130 /* Zero all the descriptors in the ring. */
1131 memset(rz->addr, 0, ring_size);
1133 rxq->rx_ring_dma = rz->iova;
1134 rxq->rx_ring = rz->addr;
1136 /* always reserve more for bulk alloc */
1137 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1139 /* Allocate the software ring. */
1140 rxq->sw_ring = rte_zmalloc_socket(NULL,
1141 sizeof(struct ice_rx_entry) * len,
1142 RTE_CACHE_LINE_SIZE,
1144 if (!rxq->sw_ring) {
1145 ice_rx_queue_release(rxq);
1146 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1150 ice_reset_rx_queue(rxq);
1152 dev->data->rx_queues[queue_idx] = rxq;
1153 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1155 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1157 if (!use_def_burst_func) {
1158 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1159 "satisfied. Rx Burst Bulk Alloc function will be "
1160 "used on port=%d, queue=%d.",
1161 rxq->port_id, rxq->queue_id);
1163 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1164 "not satisfied, Scattered Rx is requested. "
1165 "on port=%d, queue=%d.",
1166 rxq->port_id, rxq->queue_id);
1167 ad->rx_bulk_alloc_allowed = false;
1174 ice_rx_queue_release(void *rxq)
1176 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1179 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1184 rte_free(q->sw_ring);
1189 ice_tx_queue_setup(struct rte_eth_dev *dev,
1192 unsigned int socket_id,
1193 const struct rte_eth_txconf *tx_conf)
1195 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1196 struct ice_vsi *vsi = pf->main_vsi;
1197 struct ice_tx_queue *txq;
1198 const struct rte_memzone *tz;
1200 uint16_t tx_rs_thresh, tx_free_thresh;
1203 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1205 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1206 nb_desc > ICE_MAX_RING_DESC ||
1207 nb_desc < ICE_MIN_RING_DESC) {
1208 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1209 "invalid", nb_desc);
1214 * The following two parameters control the setting of the RS bit on
1215 * transmit descriptors. TX descriptors will have their RS bit set
1216 * after txq->tx_rs_thresh descriptors have been used. The TX
1217 * descriptor ring will be cleaned after txq->tx_free_thresh
1218 * descriptors are used or if the number of descriptors required to
1219 * transmit a packet is greater than the number of free TX descriptors.
1221 * The following constraints must be satisfied:
1222 * - tx_rs_thresh must be greater than 0.
1223 * - tx_rs_thresh must be less than the size of the ring minus 2.
1224 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1225 * - tx_rs_thresh must be a divisor of the ring size.
1226 * - tx_free_thresh must be greater than 0.
1227 * - tx_free_thresh must be less than the size of the ring minus 3.
1228 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1230 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1231 * race condition, hence the maximum threshold constraints. When set
1232 * to zero use default values.
1234 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1235 tx_conf->tx_free_thresh :
1236 ICE_DEFAULT_TX_FREE_THRESH);
1237 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1239 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1240 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1241 if (tx_conf->tx_rs_thresh)
1242 tx_rs_thresh = tx_conf->tx_rs_thresh;
1243 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1244 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1245 "exceed nb_desc. (tx_rs_thresh=%u "
1246 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1247 (unsigned int)tx_rs_thresh,
1248 (unsigned int)tx_free_thresh,
1249 (unsigned int)nb_desc,
1250 (int)dev->data->port_id,
1254 if (tx_rs_thresh >= (nb_desc - 2)) {
1255 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1256 "number of TX descriptors minus 2. "
1257 "(tx_rs_thresh=%u port=%d queue=%d)",
1258 (unsigned int)tx_rs_thresh,
1259 (int)dev->data->port_id,
1263 if (tx_free_thresh >= (nb_desc - 3)) {
1264 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1265 "tx_free_thresh must be less than the "
1266 "number of TX descriptors minus 3. "
1267 "(tx_free_thresh=%u port=%d queue=%d)",
1268 (unsigned int)tx_free_thresh,
1269 (int)dev->data->port_id,
1273 if (tx_rs_thresh > tx_free_thresh) {
1274 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1275 "equal to tx_free_thresh. (tx_free_thresh=%u"
1276 " tx_rs_thresh=%u port=%d queue=%d)",
1277 (unsigned int)tx_free_thresh,
1278 (unsigned int)tx_rs_thresh,
1279 (int)dev->data->port_id,
1283 if ((nb_desc % tx_rs_thresh) != 0) {
1284 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1285 "number of TX descriptors. (tx_rs_thresh=%u"
1286 " port=%d queue=%d)",
1287 (unsigned int)tx_rs_thresh,
1288 (int)dev->data->port_id,
1292 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1293 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1294 "tx_rs_thresh is greater than 1. "
1295 "(tx_rs_thresh=%u port=%d queue=%d)",
1296 (unsigned int)tx_rs_thresh,
1297 (int)dev->data->port_id,
1302 /* Free memory if needed. */
1303 if (dev->data->tx_queues[queue_idx]) {
1304 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1305 dev->data->tx_queues[queue_idx] = NULL;
1308 /* Allocate the TX queue data structure. */
1309 txq = rte_zmalloc_socket(NULL,
1310 sizeof(struct ice_tx_queue),
1311 RTE_CACHE_LINE_SIZE,
1314 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1315 "tx queue structure");
1319 /* Allocate TX hardware ring descriptors. */
1320 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1321 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1322 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1323 ring_size, ICE_RING_BASE_ALIGN,
1326 ice_tx_queue_release(txq);
1327 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1331 txq->nb_tx_desc = nb_desc;
1332 txq->tx_rs_thresh = tx_rs_thresh;
1333 txq->tx_free_thresh = tx_free_thresh;
1334 txq->pthresh = tx_conf->tx_thresh.pthresh;
1335 txq->hthresh = tx_conf->tx_thresh.hthresh;
1336 txq->wthresh = tx_conf->tx_thresh.wthresh;
1337 txq->queue_id = queue_idx;
1339 txq->reg_idx = vsi->base_queue + queue_idx;
1340 txq->port_id = dev->data->port_id;
1341 txq->offloads = offloads;
1343 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1345 txq->tx_ring_dma = tz->iova;
1346 txq->tx_ring = tz->addr;
1348 /* Allocate software ring */
1350 rte_zmalloc_socket(NULL,
1351 sizeof(struct ice_tx_entry) * nb_desc,
1352 RTE_CACHE_LINE_SIZE,
1354 if (!txq->sw_ring) {
1355 ice_tx_queue_release(txq);
1356 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1360 ice_reset_tx_queue(txq);
1362 dev->data->tx_queues[queue_idx] = txq;
1363 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1364 ice_set_tx_function_flag(dev, txq);
1370 ice_tx_queue_release(void *txq)
1372 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1375 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1380 rte_free(q->sw_ring);
1385 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1386 struct rte_eth_rxq_info *qinfo)
1388 struct ice_rx_queue *rxq;
1390 rxq = dev->data->rx_queues[queue_id];
1392 qinfo->mp = rxq->mp;
1393 qinfo->scattered_rx = dev->data->scattered_rx;
1394 qinfo->nb_desc = rxq->nb_rx_desc;
1396 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1397 qinfo->conf.rx_drop_en = rxq->drop_en;
1398 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1402 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1403 struct rte_eth_txq_info *qinfo)
1405 struct ice_tx_queue *txq;
1407 txq = dev->data->tx_queues[queue_id];
1409 qinfo->nb_desc = txq->nb_tx_desc;
1411 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1412 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1413 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1415 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1416 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1417 qinfo->conf.offloads = txq->offloads;
1418 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1422 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1424 #define ICE_RXQ_SCAN_INTERVAL 4
1425 volatile union ice_rx_flex_desc *rxdp;
1426 struct ice_rx_queue *rxq;
1429 rxq = dev->data->rx_queues[rx_queue_id];
1430 rxdp = &rxq->rx_ring[rxq->rx_tail];
1431 while ((desc < rxq->nb_rx_desc) &&
1432 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1433 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1435 * Check the DD bit of a rx descriptor of each 4 in a group,
1436 * to avoid checking too frequently and downgrading performance
1439 desc += ICE_RXQ_SCAN_INTERVAL;
1440 rxdp += ICE_RXQ_SCAN_INTERVAL;
1441 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1442 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1443 desc - rxq->nb_rx_desc]);
1449 #define ICE_RX_FLEX_ERR0_BITS \
1450 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1451 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1452 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1453 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1454 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1455 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1457 /* Rx L3/L4 checksum */
1458 static inline uint64_t
1459 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1463 /* check if HW has decoded the packet and checksum */
1464 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1467 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1468 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1472 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1473 flags |= PKT_RX_IP_CKSUM_BAD;
1475 flags |= PKT_RX_IP_CKSUM_GOOD;
1477 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1478 flags |= PKT_RX_L4_CKSUM_BAD;
1480 flags |= PKT_RX_L4_CKSUM_GOOD;
1482 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1483 flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1485 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1486 flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1488 flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1494 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1496 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1497 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1498 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1500 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1501 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1502 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1507 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1508 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1509 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1510 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1511 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1512 mb->vlan_tci_outer = mb->vlan_tci;
1513 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1514 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1515 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1516 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1518 mb->vlan_tci_outer = 0;
1521 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1522 mb->vlan_tci, mb->vlan_tci_outer);
1525 #define ICE_LOOK_AHEAD 8
1526 #if (ICE_LOOK_AHEAD != 8)
1527 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1530 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1532 volatile union ice_rx_flex_desc *rxdp;
1533 struct ice_rx_entry *rxep;
1534 struct rte_mbuf *mb;
1537 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1538 int32_t i, j, nb_rx = 0;
1539 uint64_t pkt_flags = 0;
1540 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1542 rxdp = &rxq->rx_ring[rxq->rx_tail];
1543 rxep = &rxq->sw_ring[rxq->rx_tail];
1545 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1547 /* Make sure there is at least 1 packet to receive */
1548 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1552 * Scan LOOK_AHEAD descriptors at a time to determine which
1553 * descriptors reference packets that are ready to be received.
1555 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1556 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1557 /* Read desc statuses backwards to avoid race condition */
1558 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1559 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1563 /* Compute how many status bits were set */
1564 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1565 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1569 /* Translate descriptor info to mbuf parameters */
1570 for (j = 0; j < nb_dd; j++) {
1572 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1573 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1574 mb->data_len = pkt_len;
1575 mb->pkt_len = pkt_len;
1577 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1578 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1579 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1580 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1581 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1582 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1584 mb->ol_flags |= pkt_flags;
1587 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1588 rxq->rx_stage[i + j] = rxep[j].mbuf;
1590 if (nb_dd != ICE_LOOK_AHEAD)
1594 /* Clear software ring entries */
1595 for (i = 0; i < nb_rx; i++)
1596 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1598 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1599 "port_id=%u, queue_id=%u, nb_rx=%d",
1600 rxq->port_id, rxq->queue_id, nb_rx);
1605 static inline uint16_t
1606 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1607 struct rte_mbuf **rx_pkts,
1611 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1613 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1615 for (i = 0; i < nb_pkts; i++)
1616 rx_pkts[i] = stage[i];
1618 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1619 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1625 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1627 volatile union ice_rx_flex_desc *rxdp;
1628 struct ice_rx_entry *rxep;
1629 struct rte_mbuf *mb;
1630 uint16_t alloc_idx, i;
1634 /* Allocate buffers in bulk */
1635 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1636 (rxq->rx_free_thresh - 1));
1637 rxep = &rxq->sw_ring[alloc_idx];
1638 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1639 rxq->rx_free_thresh);
1640 if (unlikely(diag != 0)) {
1641 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1645 rxdp = &rxq->rx_ring[alloc_idx];
1646 for (i = 0; i < rxq->rx_free_thresh; i++) {
1647 if (likely(i < (rxq->rx_free_thresh - 1)))
1648 /* Prefetch next mbuf */
1649 rte_prefetch0(rxep[i + 1].mbuf);
1652 rte_mbuf_refcnt_set(mb, 1);
1654 mb->data_off = RTE_PKTMBUF_HEADROOM;
1656 mb->port = rxq->port_id;
1657 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1658 rxdp[i].read.hdr_addr = 0;
1659 rxdp[i].read.pkt_addr = dma_addr;
1662 /* Update rx tail regsiter */
1663 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1665 rxq->rx_free_trigger =
1666 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1667 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1668 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1673 static inline uint16_t
1674 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1676 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1678 struct rte_eth_dev *dev;
1683 if (rxq->rx_nb_avail)
1684 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1686 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1687 rxq->rx_next_avail = 0;
1688 rxq->rx_nb_avail = nb_rx;
1689 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1691 if (rxq->rx_tail > rxq->rx_free_trigger) {
1692 if (ice_rx_alloc_bufs(rxq) != 0) {
1695 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1696 dev->data->rx_mbuf_alloc_failed +=
1697 rxq->rx_free_thresh;
1698 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1699 "port_id=%u, queue_id=%u",
1700 rxq->port_id, rxq->queue_id);
1701 rxq->rx_nb_avail = 0;
1702 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1703 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1704 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1710 if (rxq->rx_tail >= rxq->nb_rx_desc)
1713 if (rxq->rx_nb_avail)
1714 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1720 ice_recv_pkts_bulk_alloc(void *rx_queue,
1721 struct rte_mbuf **rx_pkts,
1728 if (unlikely(nb_pkts == 0))
1731 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1732 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1735 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1736 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1737 nb_rx = (uint16_t)(nb_rx + count);
1738 nb_pkts = (uint16_t)(nb_pkts - count);
1747 ice_recv_scattered_pkts(void *rx_queue,
1748 struct rte_mbuf **rx_pkts,
1751 struct ice_rx_queue *rxq = rx_queue;
1752 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1753 volatile union ice_rx_flex_desc *rxdp;
1754 union ice_rx_flex_desc rxd;
1755 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1756 struct ice_rx_entry *rxe;
1757 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1758 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1759 struct rte_mbuf *nmb; /* new allocated mbuf */
1760 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1761 uint16_t rx_id = rxq->rx_tail;
1763 uint16_t nb_hold = 0;
1764 uint16_t rx_packet_len;
1765 uint16_t rx_stat_err0;
1768 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1769 struct rte_eth_dev *dev;
1771 while (nb_rx < nb_pkts) {
1772 rxdp = &rx_ring[rx_id];
1773 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1775 /* Check the DD bit first */
1776 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1780 nmb = rte_mbuf_raw_alloc(rxq->mp);
1781 if (unlikely(!nmb)) {
1782 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1783 dev->data->rx_mbuf_alloc_failed++;
1786 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1789 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1791 if (unlikely(rx_id == rxq->nb_rx_desc))
1794 /* Prefetch next mbuf */
1795 rte_prefetch0(sw_ring[rx_id].mbuf);
1798 * When next RX descriptor is on a cache line boundary,
1799 * prefetch the next 4 RX descriptors and next 8 pointers
1802 if ((rx_id & 0x3) == 0) {
1803 rte_prefetch0(&rx_ring[rx_id]);
1804 rte_prefetch0(&sw_ring[rx_id]);
1810 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1812 /* Set data buffer address and data length of the mbuf */
1813 rxdp->read.hdr_addr = 0;
1814 rxdp->read.pkt_addr = dma_addr;
1815 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1816 ICE_RX_FLX_DESC_PKT_LEN_M;
1817 rxm->data_len = rx_packet_len;
1818 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1821 * If this is the first buffer of the received packet, set the
1822 * pointer to the first mbuf of the packet and initialize its
1823 * context. Otherwise, update the total length and the number
1824 * of segments of the current scattered packet, and update the
1825 * pointer to the last mbuf of the current packet.
1829 first_seg->nb_segs = 1;
1830 first_seg->pkt_len = rx_packet_len;
1832 first_seg->pkt_len =
1833 (uint16_t)(first_seg->pkt_len +
1835 first_seg->nb_segs++;
1836 last_seg->next = rxm;
1840 * If this is not the last buffer of the received packet,
1841 * update the pointer to the last mbuf of the current scattered
1842 * packet and continue to parse the RX ring.
1844 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1850 * This is the last buffer of the received packet. If the CRC
1851 * is not stripped by the hardware:
1852 * - Subtract the CRC length from the total packet length.
1853 * - If the last buffer only contains the whole CRC or a part
1854 * of it, free the mbuf associated to the last buffer. If part
1855 * of the CRC is also contained in the previous mbuf, subtract
1856 * the length of that CRC part from the data length of the
1860 if (unlikely(rxq->crc_len > 0)) {
1861 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1862 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1863 rte_pktmbuf_free_seg(rxm);
1864 first_seg->nb_segs--;
1865 last_seg->data_len =
1866 (uint16_t)(last_seg->data_len -
1867 (RTE_ETHER_CRC_LEN - rx_packet_len));
1868 last_seg->next = NULL;
1870 rxm->data_len = (uint16_t)(rx_packet_len -
1874 first_seg->port = rxq->port_id;
1875 first_seg->ol_flags = 0;
1876 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1877 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1878 ice_rxd_to_vlan_tci(first_seg, &rxd);
1879 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1880 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1881 first_seg->ol_flags |= pkt_flags;
1882 /* Prefetch data of first segment, if configured to do so. */
1883 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1884 first_seg->data_off));
1885 rx_pkts[nb_rx++] = first_seg;
1889 /* Record index of the next RX descriptor to probe. */
1890 rxq->rx_tail = rx_id;
1891 rxq->pkt_first_seg = first_seg;
1892 rxq->pkt_last_seg = last_seg;
1895 * If the number of free RX descriptors is greater than the RX free
1896 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1897 * register. Update the RDT with the value of the last processed RX
1898 * descriptor minus 1, to guarantee that the RDT register is never
1899 * equal to the RDH register, which creates a "full" ring situtation
1900 * from the hardware point of view.
1902 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1903 if (nb_hold > rxq->rx_free_thresh) {
1904 rx_id = (uint16_t)(rx_id == 0 ?
1905 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1906 /* write TAIL register */
1907 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1910 rxq->nb_rx_hold = nb_hold;
1912 /* return received packet in the burst */
1917 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1919 struct ice_adapter *ad =
1920 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1921 const uint32_t *ptypes;
1923 static const uint32_t ptypes_os[] = {
1924 /* refers to ice_get_default_pkt_type() */
1926 RTE_PTYPE_L2_ETHER_TIMESYNC,
1927 RTE_PTYPE_L2_ETHER_LLDP,
1928 RTE_PTYPE_L2_ETHER_ARP,
1929 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1930 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1933 RTE_PTYPE_L4_NONFRAG,
1937 RTE_PTYPE_TUNNEL_GRENAT,
1938 RTE_PTYPE_TUNNEL_IP,
1939 RTE_PTYPE_INNER_L2_ETHER,
1940 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1941 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1942 RTE_PTYPE_INNER_L4_FRAG,
1943 RTE_PTYPE_INNER_L4_ICMP,
1944 RTE_PTYPE_INNER_L4_NONFRAG,
1945 RTE_PTYPE_INNER_L4_SCTP,
1946 RTE_PTYPE_INNER_L4_TCP,
1947 RTE_PTYPE_INNER_L4_UDP,
1951 static const uint32_t ptypes_comms[] = {
1952 /* refers to ice_get_default_pkt_type() */
1954 RTE_PTYPE_L2_ETHER_TIMESYNC,
1955 RTE_PTYPE_L2_ETHER_LLDP,
1956 RTE_PTYPE_L2_ETHER_ARP,
1957 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1958 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1961 RTE_PTYPE_L4_NONFRAG,
1965 RTE_PTYPE_TUNNEL_GRENAT,
1966 RTE_PTYPE_TUNNEL_IP,
1967 RTE_PTYPE_INNER_L2_ETHER,
1968 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1969 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1970 RTE_PTYPE_INNER_L4_FRAG,
1971 RTE_PTYPE_INNER_L4_ICMP,
1972 RTE_PTYPE_INNER_L4_NONFRAG,
1973 RTE_PTYPE_INNER_L4_SCTP,
1974 RTE_PTYPE_INNER_L4_TCP,
1975 RTE_PTYPE_INNER_L4_UDP,
1976 RTE_PTYPE_TUNNEL_GTPC,
1977 RTE_PTYPE_TUNNEL_GTPU,
1978 RTE_PTYPE_L2_ETHER_PPPOE,
1982 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1983 ptypes = ptypes_comms;
1987 if (dev->rx_pkt_burst == ice_recv_pkts ||
1988 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1989 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1993 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1994 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1995 #ifdef CC_AVX512_SUPPORT
1996 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
1997 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
1998 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
1999 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2001 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2002 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
2010 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2012 volatile union ice_rx_flex_desc *rxdp;
2013 struct ice_rx_queue *rxq = rx_queue;
2016 if (unlikely(offset >= rxq->nb_rx_desc))
2019 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2020 return RTE_ETH_RX_DESC_UNAVAIL;
2022 desc = rxq->rx_tail + offset;
2023 if (desc >= rxq->nb_rx_desc)
2024 desc -= rxq->nb_rx_desc;
2026 rxdp = &rxq->rx_ring[desc];
2027 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2028 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2029 return RTE_ETH_RX_DESC_DONE;
2031 return RTE_ETH_RX_DESC_AVAIL;
2035 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2037 struct ice_tx_queue *txq = tx_queue;
2038 volatile uint64_t *status;
2039 uint64_t mask, expect;
2042 if (unlikely(offset >= txq->nb_tx_desc))
2045 desc = txq->tx_tail + offset;
2046 /* go to next desc that has the RS bit */
2047 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2049 if (desc >= txq->nb_tx_desc) {
2050 desc -= txq->nb_tx_desc;
2051 if (desc >= txq->nb_tx_desc)
2052 desc -= txq->nb_tx_desc;
2055 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2056 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2057 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2058 ICE_TXD_QW1_DTYPE_S);
2059 if ((*status & mask) == expect)
2060 return RTE_ETH_TX_DESC_DONE;
2062 return RTE_ETH_TX_DESC_FULL;
2066 ice_free_queues(struct rte_eth_dev *dev)
2070 PMD_INIT_FUNC_TRACE();
2072 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2073 if (!dev->data->rx_queues[i])
2075 ice_rx_queue_release(dev->data->rx_queues[i]);
2076 dev->data->rx_queues[i] = NULL;
2077 rte_eth_dma_zone_free(dev, "rx_ring", i);
2079 dev->data->nb_rx_queues = 0;
2081 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2082 if (!dev->data->tx_queues[i])
2084 ice_tx_queue_release(dev->data->tx_queues[i]);
2085 dev->data->tx_queues[i] = NULL;
2086 rte_eth_dma_zone_free(dev, "tx_ring", i);
2088 dev->data->nb_tx_queues = 0;
2091 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
2092 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
2095 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2097 struct ice_tx_queue *txq;
2098 const struct rte_memzone *tz = NULL;
2100 struct rte_eth_dev *dev;
2103 PMD_DRV_LOG(ERR, "PF is not available");
2107 dev = pf->adapter->eth_dev;
2109 /* Allocate the TX queue data structure. */
2110 txq = rte_zmalloc_socket("ice fdir tx queue",
2111 sizeof(struct ice_tx_queue),
2112 RTE_CACHE_LINE_SIZE,
2115 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2116 "tx queue structure.");
2120 /* Allocate TX hardware ring descriptors. */
2121 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2122 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2124 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2125 ICE_FDIR_QUEUE_ID, ring_size,
2126 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2128 ice_tx_queue_release(txq);
2129 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2133 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2134 txq->queue_id = ICE_FDIR_QUEUE_ID;
2135 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2136 txq->vsi = pf->fdir.fdir_vsi;
2138 txq->tx_ring_dma = tz->iova;
2139 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2141 * don't need to allocate software ring and reset for the fdir
2142 * program queue just set the queue has been configured.
2147 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2153 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2155 struct ice_rx_queue *rxq;
2156 const struct rte_memzone *rz = NULL;
2158 struct rte_eth_dev *dev;
2161 PMD_DRV_LOG(ERR, "PF is not available");
2165 dev = pf->adapter->eth_dev;
2167 /* Allocate the RX queue data structure. */
2168 rxq = rte_zmalloc_socket("ice fdir rx queue",
2169 sizeof(struct ice_rx_queue),
2170 RTE_CACHE_LINE_SIZE,
2173 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2174 "rx queue structure.");
2178 /* Allocate RX hardware ring descriptors. */
2179 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2180 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2182 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2183 ICE_FDIR_QUEUE_ID, ring_size,
2184 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2186 ice_rx_queue_release(rxq);
2187 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2191 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2192 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2193 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2194 rxq->vsi = pf->fdir.fdir_vsi;
2196 rxq->rx_ring_dma = rz->iova;
2197 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2198 sizeof(union ice_32byte_rx_desc));
2199 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2202 * Don't need to allocate software ring and reset for the fdir
2203 * rx queue, just set the queue has been configured.
2208 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2214 ice_recv_pkts(void *rx_queue,
2215 struct rte_mbuf **rx_pkts,
2218 struct ice_rx_queue *rxq = rx_queue;
2219 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2220 volatile union ice_rx_flex_desc *rxdp;
2221 union ice_rx_flex_desc rxd;
2222 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2223 struct ice_rx_entry *rxe;
2224 struct rte_mbuf *nmb; /* new allocated mbuf */
2225 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2226 uint16_t rx_id = rxq->rx_tail;
2228 uint16_t nb_hold = 0;
2229 uint16_t rx_packet_len;
2230 uint16_t rx_stat_err0;
2233 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2234 struct rte_eth_dev *dev;
2236 while (nb_rx < nb_pkts) {
2237 rxdp = &rx_ring[rx_id];
2238 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2240 /* Check the DD bit first */
2241 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2245 nmb = rte_mbuf_raw_alloc(rxq->mp);
2246 if (unlikely(!nmb)) {
2247 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2248 dev->data->rx_mbuf_alloc_failed++;
2251 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2254 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2256 if (unlikely(rx_id == rxq->nb_rx_desc))
2261 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2264 * fill the read format of descriptor with physic address in
2265 * new allocated mbuf: nmb
2267 rxdp->read.hdr_addr = 0;
2268 rxdp->read.pkt_addr = dma_addr;
2270 /* calculate rx_packet_len of the received pkt */
2271 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2272 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2274 /* fill old mbuf with received descriptor: rxd */
2275 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2276 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2279 rxm->pkt_len = rx_packet_len;
2280 rxm->data_len = rx_packet_len;
2281 rxm->port = rxq->port_id;
2282 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2283 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2284 ice_rxd_to_vlan_tci(rxm, &rxd);
2285 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
2286 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2287 rxm->ol_flags |= pkt_flags;
2288 /* copy old mbuf to rx_pkts */
2289 rx_pkts[nb_rx++] = rxm;
2291 rxq->rx_tail = rx_id;
2293 * If the number of free RX descriptors is greater than the RX free
2294 * threshold of the queue, advance the receive tail register of queue.
2295 * Update that register with the value of the last processed RX
2296 * descriptor minus 1.
2298 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2299 if (nb_hold > rxq->rx_free_thresh) {
2300 rx_id = (uint16_t)(rx_id == 0 ?
2301 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2302 /* write TAIL register */
2303 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2306 rxq->nb_rx_hold = nb_hold;
2308 /* return received packet in the burst */
2313 ice_parse_tunneling_params(uint64_t ol_flags,
2314 union ice_tx_offload tx_offload,
2315 uint32_t *cd_tunneling)
2317 /* EIPT: External (outer) IP header type */
2318 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2319 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2320 else if (ol_flags & PKT_TX_OUTER_IPV4)
2321 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2322 else if (ol_flags & PKT_TX_OUTER_IPV6)
2323 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2325 /* EIPLEN: External (outer) IP header length, in DWords */
2326 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2327 ICE_TXD_CTX_QW0_EIPLEN_S;
2329 /* L4TUNT: L4 Tunneling Type */
2330 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2331 case PKT_TX_TUNNEL_IPIP:
2332 /* for non UDP / GRE tunneling, set to 00b */
2334 case PKT_TX_TUNNEL_VXLAN:
2335 case PKT_TX_TUNNEL_GTP:
2336 case PKT_TX_TUNNEL_GENEVE:
2337 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2339 case PKT_TX_TUNNEL_GRE:
2340 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2343 PMD_TX_LOG(ERR, "Tunnel type not supported");
2347 /* L4TUNLEN: L4 Tunneling Length, in Words
2349 * We depend on app to set rte_mbuf.l2_len correctly.
2350 * For IP in GRE it should be set to the length of the GRE
2352 * For MAC in GRE or MAC in UDP it should be set to the length
2353 * of the GRE or UDP headers plus the inner MAC up to including
2354 * its last Ethertype.
2355 * If MPLS labels exists, it should include them as well.
2357 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2358 ICE_TXD_CTX_QW0_NATLEN_S;
2361 * Calculate the tunneling UDP checksum.
2362 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2364 if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
2365 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2366 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2370 ice_txd_enable_checksum(uint64_t ol_flags,
2372 uint32_t *td_offset,
2373 union ice_tx_offload tx_offload)
2376 if (ol_flags & PKT_TX_TUNNEL_MASK)
2377 *td_offset |= (tx_offload.outer_l2_len >> 1)
2378 << ICE_TX_DESC_LEN_MACLEN_S;
2380 *td_offset |= (tx_offload.l2_len >> 1)
2381 << ICE_TX_DESC_LEN_MACLEN_S;
2383 /* Enable L3 checksum offloads */
2384 if (ol_flags & PKT_TX_IP_CKSUM) {
2385 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2386 *td_offset |= (tx_offload.l3_len >> 2) <<
2387 ICE_TX_DESC_LEN_IPLEN_S;
2388 } else if (ol_flags & PKT_TX_IPV4) {
2389 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2390 *td_offset |= (tx_offload.l3_len >> 2) <<
2391 ICE_TX_DESC_LEN_IPLEN_S;
2392 } else if (ol_flags & PKT_TX_IPV6) {
2393 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2394 *td_offset |= (tx_offload.l3_len >> 2) <<
2395 ICE_TX_DESC_LEN_IPLEN_S;
2398 if (ol_flags & PKT_TX_TCP_SEG) {
2399 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2400 *td_offset |= (tx_offload.l4_len >> 2) <<
2401 ICE_TX_DESC_LEN_L4_LEN_S;
2405 /* Enable L4 checksum offloads */
2406 switch (ol_flags & PKT_TX_L4_MASK) {
2407 case PKT_TX_TCP_CKSUM:
2408 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2409 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2410 ICE_TX_DESC_LEN_L4_LEN_S;
2412 case PKT_TX_SCTP_CKSUM:
2413 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2414 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2415 ICE_TX_DESC_LEN_L4_LEN_S;
2417 case PKT_TX_UDP_CKSUM:
2418 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2419 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2420 ICE_TX_DESC_LEN_L4_LEN_S;
2428 ice_xmit_cleanup(struct ice_tx_queue *txq)
2430 struct ice_tx_entry *sw_ring = txq->sw_ring;
2431 volatile struct ice_tx_desc *txd = txq->tx_ring;
2432 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2433 uint16_t nb_tx_desc = txq->nb_tx_desc;
2434 uint16_t desc_to_clean_to;
2435 uint16_t nb_tx_to_clean;
2437 /* Determine the last descriptor needing to be cleaned */
2438 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2439 if (desc_to_clean_to >= nb_tx_desc)
2440 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2442 /* Check to make sure the last descriptor to clean is done */
2443 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2444 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2445 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2446 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2447 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2449 txq->port_id, txq->queue_id,
2450 txd[desc_to_clean_to].cmd_type_offset_bsz);
2451 /* Failed to clean any descriptors */
2455 /* Figure out how many descriptors will be cleaned */
2456 if (last_desc_cleaned > desc_to_clean_to)
2457 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2460 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2463 /* The last descriptor to clean is done, so that means all the
2464 * descriptors from the last descriptor that was cleaned
2465 * up to the last descriptor with the RS bit set
2466 * are done. Only reset the threshold descriptor.
2468 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2470 /* Update the txq to reflect the last descriptor that was cleaned */
2471 txq->last_desc_cleaned = desc_to_clean_to;
2472 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2477 /* Construct the tx flags */
2478 static inline uint64_t
2479 ice_build_ctob(uint32_t td_cmd,
2484 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2485 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2486 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2487 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2488 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2491 /* Check if the context descriptor is needed for TX offloading */
2492 static inline uint16_t
2493 ice_calc_context_desc(uint64_t flags)
2495 static uint64_t mask = PKT_TX_TCP_SEG |
2497 PKT_TX_OUTER_IP_CKSUM |
2500 return (flags & mask) ? 1 : 0;
2503 /* set ice TSO context descriptor */
2504 static inline uint64_t
2505 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2507 uint64_t ctx_desc = 0;
2508 uint32_t cd_cmd, hdr_len, cd_tso_len;
2510 if (!tx_offload.l4_len) {
2511 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2515 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2516 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2517 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2519 cd_cmd = ICE_TX_CTX_DESC_TSO;
2520 cd_tso_len = mbuf->pkt_len - hdr_len;
2521 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2522 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2523 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2528 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2529 #define ICE_MAX_DATA_PER_TXD \
2530 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2531 /* Calculate the number of TX descriptors needed for each pkt */
2532 static inline uint16_t
2533 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2535 struct rte_mbuf *txd = tx_pkt;
2538 while (txd != NULL) {
2539 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2547 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2549 struct ice_tx_queue *txq;
2550 volatile struct ice_tx_desc *tx_ring;
2551 volatile struct ice_tx_desc *txd;
2552 struct ice_tx_entry *sw_ring;
2553 struct ice_tx_entry *txe, *txn;
2554 struct rte_mbuf *tx_pkt;
2555 struct rte_mbuf *m_seg;
2556 uint32_t cd_tunneling_params;
2561 uint32_t td_cmd = 0;
2562 uint32_t td_offset = 0;
2563 uint32_t td_tag = 0;
2566 uint64_t buf_dma_addr;
2568 union ice_tx_offload tx_offload = {0};
2571 sw_ring = txq->sw_ring;
2572 tx_ring = txq->tx_ring;
2573 tx_id = txq->tx_tail;
2574 txe = &sw_ring[tx_id];
2576 /* Check if the descriptor ring needs to be cleaned. */
2577 if (txq->nb_tx_free < txq->tx_free_thresh)
2578 (void)ice_xmit_cleanup(txq);
2580 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2581 tx_pkt = *tx_pkts++;
2586 ol_flags = tx_pkt->ol_flags;
2587 tx_offload.l2_len = tx_pkt->l2_len;
2588 tx_offload.l3_len = tx_pkt->l3_len;
2589 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2590 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2591 tx_offload.l4_len = tx_pkt->l4_len;
2592 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2593 /* Calculate the number of context descriptors needed. */
2594 nb_ctx = ice_calc_context_desc(ol_flags);
2596 /* The number of descriptors that must be allocated for
2597 * a packet equals to the number of the segments of that
2598 * packet plus the number of context descriptor if needed.
2599 * Recalculate the needed tx descs when TSO enabled in case
2600 * the mbuf data size exceeds max data size that hw allows
2603 if (ol_flags & PKT_TX_TCP_SEG)
2604 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2607 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2608 tx_last = (uint16_t)(tx_id + nb_used - 1);
2611 if (tx_last >= txq->nb_tx_desc)
2612 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2614 if (nb_used > txq->nb_tx_free) {
2615 if (ice_xmit_cleanup(txq) != 0) {
2620 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2621 while (nb_used > txq->nb_tx_free) {
2622 if (ice_xmit_cleanup(txq) != 0) {
2631 /* Descriptor based VLAN insertion */
2632 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2633 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2634 td_tag = tx_pkt->vlan_tci;
2637 /* Fill in tunneling parameters if necessary */
2638 cd_tunneling_params = 0;
2639 if (ol_flags & PKT_TX_TUNNEL_MASK)
2640 ice_parse_tunneling_params(ol_flags, tx_offload,
2641 &cd_tunneling_params);
2643 /* Enable checksum offloading */
2644 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2645 ice_txd_enable_checksum(ol_flags, &td_cmd,
2646 &td_offset, tx_offload);
2649 /* Setup TX context descriptor if required */
2650 volatile struct ice_tx_ctx_desc *ctx_txd =
2651 (volatile struct ice_tx_ctx_desc *)
2653 uint16_t cd_l2tag2 = 0;
2654 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2656 txn = &sw_ring[txe->next_id];
2657 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2659 rte_pktmbuf_free_seg(txe->mbuf);
2663 if (ol_flags & PKT_TX_TCP_SEG)
2664 cd_type_cmd_tso_mss |=
2665 ice_set_tso_ctx(tx_pkt, tx_offload);
2667 ctx_txd->tunneling_params =
2668 rte_cpu_to_le_32(cd_tunneling_params);
2670 /* TX context descriptor based double VLAN insert */
2671 if (ol_flags & PKT_TX_QINQ) {
2672 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2673 cd_type_cmd_tso_mss |=
2674 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2675 ICE_TXD_CTX_QW1_CMD_S);
2677 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2679 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2681 txe->last_id = tx_last;
2682 tx_id = txe->next_id;
2688 txd = &tx_ring[tx_id];
2689 txn = &sw_ring[txe->next_id];
2692 rte_pktmbuf_free_seg(txe->mbuf);
2695 /* Setup TX Descriptor */
2696 slen = m_seg->data_len;
2697 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2699 while ((ol_flags & PKT_TX_TCP_SEG) &&
2700 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2701 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2702 txd->cmd_type_offset_bsz =
2703 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2704 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2705 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2706 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2707 ICE_TXD_QW1_TX_BUF_SZ_S) |
2708 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2710 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2711 slen -= ICE_MAX_DATA_PER_TXD;
2713 txe->last_id = tx_last;
2714 tx_id = txe->next_id;
2716 txd = &tx_ring[tx_id];
2717 txn = &sw_ring[txe->next_id];
2720 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2721 txd->cmd_type_offset_bsz =
2722 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2723 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2724 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2725 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2726 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2728 txe->last_id = tx_last;
2729 tx_id = txe->next_id;
2731 m_seg = m_seg->next;
2734 /* fill the last descriptor with End of Packet (EOP) bit */
2735 td_cmd |= ICE_TX_DESC_CMD_EOP;
2736 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2737 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2739 /* set RS bit on the last descriptor of one packet */
2740 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2742 "Setting RS bit on TXD id="
2743 "%4u (port=%d queue=%d)",
2744 tx_last, txq->port_id, txq->queue_id);
2746 td_cmd |= ICE_TX_DESC_CMD_RS;
2748 /* Update txq RS bit counters */
2749 txq->nb_tx_used = 0;
2751 txd->cmd_type_offset_bsz |=
2752 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2756 /* update Tail register */
2757 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2758 txq->tx_tail = tx_id;
2763 static __rte_always_inline int
2764 ice_tx_free_bufs(struct ice_tx_queue *txq)
2766 struct ice_tx_entry *txep;
2769 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2770 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2771 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2774 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2776 for (i = 0; i < txq->tx_rs_thresh; i++)
2777 rte_prefetch0((txep + i)->mbuf);
2779 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2780 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2781 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2785 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2786 rte_pktmbuf_free_seg(txep->mbuf);
2791 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2792 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2793 if (txq->tx_next_dd >= txq->nb_tx_desc)
2794 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2796 return txq->tx_rs_thresh;
2800 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2803 struct ice_tx_entry *swr_ring = txq->sw_ring;
2804 uint16_t i, tx_last, tx_id;
2805 uint16_t nb_tx_free_last;
2806 uint16_t nb_tx_to_clean;
2809 /* Start free mbuf from the next of tx_tail */
2810 tx_last = txq->tx_tail;
2811 tx_id = swr_ring[tx_last].next_id;
2813 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2816 nb_tx_to_clean = txq->nb_tx_free;
2817 nb_tx_free_last = txq->nb_tx_free;
2819 free_cnt = txq->nb_tx_desc;
2821 /* Loop through swr_ring to count the amount of
2822 * freeable mubfs and packets.
2824 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2825 for (i = 0; i < nb_tx_to_clean &&
2826 pkt_cnt < free_cnt &&
2827 tx_id != tx_last; i++) {
2828 if (swr_ring[tx_id].mbuf != NULL) {
2829 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2830 swr_ring[tx_id].mbuf = NULL;
2833 * last segment in the packet,
2834 * increment packet count
2836 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2839 tx_id = swr_ring[tx_id].next_id;
2842 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2843 txq->nb_tx_free || tx_id == tx_last)
2846 if (pkt_cnt < free_cnt) {
2847 if (ice_xmit_cleanup(txq))
2850 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2851 nb_tx_free_last = txq->nb_tx_free;
2855 return (int)pkt_cnt;
2860 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2861 uint32_t free_cnt __rte_unused)
2868 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2873 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2874 free_cnt = txq->nb_tx_desc;
2876 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2878 for (i = 0; i < cnt; i += n) {
2879 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2882 n = ice_tx_free_bufs(txq);
2892 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2894 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2895 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2896 struct ice_adapter *ad =
2897 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2900 if (ad->tx_vec_allowed)
2901 return ice_tx_done_cleanup_vec(q, free_cnt);
2903 if (ad->tx_simple_allowed)
2904 return ice_tx_done_cleanup_simple(q, free_cnt);
2906 return ice_tx_done_cleanup_full(q, free_cnt);
2909 /* Populate 4 descriptors with data from 4 mbufs */
2911 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2916 for (i = 0; i < 4; i++, txdp++, pkts++) {
2917 dma_addr = rte_mbuf_data_iova(*pkts);
2918 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2919 txdp->cmd_type_offset_bsz =
2920 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2921 (*pkts)->data_len, 0);
2925 /* Populate 1 descriptor with data from 1 mbuf */
2927 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2931 dma_addr = rte_mbuf_data_iova(*pkts);
2932 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2933 txdp->cmd_type_offset_bsz =
2934 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2935 (*pkts)->data_len, 0);
2939 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2942 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2943 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2944 const int N_PER_LOOP = 4;
2945 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2946 int mainpart, leftover;
2950 * Process most of the packets in chunks of N pkts. Any
2951 * leftover packets will get processed one at a time.
2953 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2954 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2955 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2956 /* Copy N mbuf pointers to the S/W ring */
2957 for (j = 0; j < N_PER_LOOP; ++j)
2958 (txep + i + j)->mbuf = *(pkts + i + j);
2959 tx4(txdp + i, pkts + i);
2962 if (unlikely(leftover > 0)) {
2963 for (i = 0; i < leftover; ++i) {
2964 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2965 tx1(txdp + mainpart + i, pkts + mainpart + i);
2970 static inline uint16_t
2971 tx_xmit_pkts(struct ice_tx_queue *txq,
2972 struct rte_mbuf **tx_pkts,
2975 volatile struct ice_tx_desc *txr = txq->tx_ring;
2979 * Begin scanning the H/W ring for done descriptors when the number
2980 * of available descriptors drops below tx_free_thresh. For each done
2981 * descriptor, free the associated buffer.
2983 if (txq->nb_tx_free < txq->tx_free_thresh)
2984 ice_tx_free_bufs(txq);
2986 /* Use available descriptor only */
2987 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2988 if (unlikely(!nb_pkts))
2991 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2992 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2993 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2994 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2995 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2996 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2998 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3002 /* Fill hardware descriptor ring with mbuf data */
3003 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3004 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3006 /* Determin if RS bit needs to be set */
3007 if (txq->tx_tail > txq->tx_next_rs) {
3008 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3009 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3012 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3013 if (txq->tx_next_rs >= txq->nb_tx_desc)
3014 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3017 if (txq->tx_tail >= txq->nb_tx_desc)
3020 /* Update the tx tail register */
3021 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3027 ice_xmit_pkts_simple(void *tx_queue,
3028 struct rte_mbuf **tx_pkts,
3033 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3034 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3038 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3041 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3042 &tx_pkts[nb_tx], num);
3043 nb_tx = (uint16_t)(nb_tx + ret);
3044 nb_pkts = (uint16_t)(nb_pkts - ret);
3053 ice_set_rx_function(struct rte_eth_dev *dev)
3055 PMD_INIT_FUNC_TRACE();
3056 struct ice_adapter *ad =
3057 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3059 struct ice_rx_queue *rxq;
3062 bool use_avx512 = false;
3063 bool use_avx2 = false;
3065 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3066 rx_check_ret = ice_rx_vec_dev_check(dev);
3067 if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3068 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3069 ad->rx_vec_allowed = true;
3070 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3071 rxq = dev->data->rx_queues[i];
3072 if (rxq && ice_rxq_vec_setup(rxq)) {
3073 ad->rx_vec_allowed = false;
3078 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3079 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3080 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3081 #ifdef CC_AVX512_SUPPORT
3085 "AVX512 is not supported in build env");
3088 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3089 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3090 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3094 ad->rx_vec_allowed = false;
3098 if (ad->rx_vec_allowed) {
3099 if (dev->data->scattered_rx) {
3101 #ifdef CC_AVX512_SUPPORT
3102 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3104 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3105 dev->data->port_id);
3107 ice_recv_scattered_pkts_vec_avx512_offload;
3110 "Using AVX512 Vector Scattered Rx (port %d).",
3111 dev->data->port_id);
3113 ice_recv_scattered_pkts_vec_avx512;
3118 "Using %sVector Scattered Rx (port %d).",
3119 use_avx2 ? "avx2 " : "",
3120 dev->data->port_id);
3121 dev->rx_pkt_burst = use_avx2 ?
3122 ice_recv_scattered_pkts_vec_avx2 :
3123 ice_recv_scattered_pkts_vec;
3127 #ifdef CC_AVX512_SUPPORT
3128 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3130 "Using AVX512 OFFLOAD Vector Rx (port %d).",
3131 dev->data->port_id);
3133 ice_recv_pkts_vec_avx512_offload;
3136 "Using AVX512 Vector Rx (port %d).",
3137 dev->data->port_id);
3139 ice_recv_pkts_vec_avx512;
3144 "Using %sVector Rx (port %d).",
3145 use_avx2 ? "avx2 " : "",
3146 dev->data->port_id);
3147 dev->rx_pkt_burst = use_avx2 ?
3148 ice_recv_pkts_vec_avx2 :
3157 if (dev->data->scattered_rx) {
3158 /* Set the non-LRO scattered function */
3160 "Using a Scattered function on port %d.",
3161 dev->data->port_id);
3162 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3163 } else if (ad->rx_bulk_alloc_allowed) {
3165 "Rx Burst Bulk Alloc Preconditions are "
3166 "satisfied. Rx Burst Bulk Alloc function "
3167 "will be used on port %d.",
3168 dev->data->port_id);
3169 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3172 "Rx Burst Bulk Alloc Preconditions are not "
3173 "satisfied, Normal Rx will be used on port %d.",
3174 dev->data->port_id);
3175 dev->rx_pkt_burst = ice_recv_pkts;
3179 static const struct {
3180 eth_rx_burst_t pkt_burst;
3182 } ice_rx_burst_infos[] = {
3183 { ice_recv_scattered_pkts, "Scalar Scattered" },
3184 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3185 { ice_recv_pkts, "Scalar" },
3187 #ifdef CC_AVX512_SUPPORT
3188 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3189 { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3190 { ice_recv_pkts_vec_avx512, "Vector AVX512" },
3191 { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3193 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3194 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3195 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3196 { ice_recv_pkts_vec, "Vector SSE" },
3201 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3202 struct rte_eth_burst_mode *mode)
3204 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3208 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3209 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3210 snprintf(mode->info, sizeof(mode->info), "%s",
3211 ice_rx_burst_infos[i].info);
3221 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3223 struct ice_adapter *ad =
3224 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3226 /* Use a simple Tx queue if possible (only fast free is allowed) */
3227 ad->tx_simple_allowed =
3229 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3230 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3232 if (ad->tx_simple_allowed)
3233 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3237 "Simple Tx can NOT be enabled on Tx queue %u.",
3241 /*********************************************************************
3245 **********************************************************************/
3246 /* The default values of TSO MSS */
3247 #define ICE_MIN_TSO_MSS 64
3248 #define ICE_MAX_TSO_MSS 9728
3249 #define ICE_MAX_TSO_FRAME_SIZE 262144
3251 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3258 for (i = 0; i < nb_pkts; i++) {
3260 ol_flags = m->ol_flags;
3262 if (ol_flags & PKT_TX_TCP_SEG &&
3263 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3264 m->tso_segsz > ICE_MAX_TSO_MSS ||
3265 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3267 * MSS outside the range are considered malicious
3273 #ifdef RTE_ETHDEV_DEBUG_TX
3274 ret = rte_validate_tx_offload(m);
3280 ret = rte_net_intel_cksum_prepare(m);
3290 ice_set_tx_function(struct rte_eth_dev *dev)
3292 struct ice_adapter *ad =
3293 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3295 struct ice_tx_queue *txq;
3298 bool use_avx512 = false;
3299 bool use_avx2 = false;
3301 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3302 tx_check_ret = ice_tx_vec_dev_check(dev);
3303 if (tx_check_ret >= 0 &&
3304 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3305 ad->tx_vec_allowed = true;
3306 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3307 txq = dev->data->tx_queues[i];
3308 if (txq && ice_txq_vec_setup(txq)) {
3309 ad->tx_vec_allowed = false;
3314 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3315 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3316 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3317 #ifdef CC_AVX512_SUPPORT
3321 "AVX512 is not supported in build env");
3323 if (!use_avx512 && tx_check_ret == ICE_VECTOR_PATH &&
3324 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3325 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3326 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3329 if (!use_avx512 && tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3330 ad->tx_vec_allowed = false;
3333 ad->tx_vec_allowed = false;
3337 if (ad->tx_vec_allowed) {
3339 #ifdef CC_AVX512_SUPPORT
3340 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3342 "Using AVX512 OFFLOAD Vector Tx (port %d).",
3343 dev->data->port_id);
3345 ice_xmit_pkts_vec_avx512_offload;
3348 "Using AVX512 Vector Tx (port %d).",
3349 dev->data->port_id);
3350 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3354 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3355 use_avx2 ? "avx2 " : "",
3356 dev->data->port_id);
3357 dev->tx_pkt_burst = use_avx2 ?
3358 ice_xmit_pkts_vec_avx2 :
3361 dev->tx_pkt_prepare = NULL;
3367 if (ad->tx_simple_allowed) {
3368 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3369 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3370 dev->tx_pkt_prepare = NULL;
3372 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3373 dev->tx_pkt_burst = ice_xmit_pkts;
3374 dev->tx_pkt_prepare = ice_prep_pkts;
3378 static const struct {
3379 eth_tx_burst_t pkt_burst;
3381 } ice_tx_burst_infos[] = {
3382 { ice_xmit_pkts_simple, "Scalar Simple" },
3383 { ice_xmit_pkts, "Scalar" },
3385 #ifdef CC_AVX512_SUPPORT
3386 { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3387 { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3389 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3390 { ice_xmit_pkts_vec, "Vector SSE" },
3395 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3396 struct rte_eth_burst_mode *mode)
3398 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3402 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3403 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3404 snprintf(mode->info, sizeof(mode->info), "%s",
3405 ice_tx_burst_infos[i].info);
3414 /* For each value it means, datasheet of hardware can tell more details
3416 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3418 static inline uint32_t
3419 ice_get_default_pkt_type(uint16_t ptype)
3421 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3422 __rte_cache_aligned = {
3425 [1] = RTE_PTYPE_L2_ETHER,
3426 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3427 /* [3] - [5] reserved */
3428 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3429 /* [7] - [10] reserved */
3430 [11] = RTE_PTYPE_L2_ETHER_ARP,
3431 /* [12] - [21] reserved */
3433 /* Non tunneled IPv4 */
3434 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3436 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3437 RTE_PTYPE_L4_NONFRAG,
3438 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3441 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3443 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3445 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3449 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3450 RTE_PTYPE_TUNNEL_IP |
3451 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3452 RTE_PTYPE_INNER_L4_FRAG,
3453 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3454 RTE_PTYPE_TUNNEL_IP |
3455 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3456 RTE_PTYPE_INNER_L4_NONFRAG,
3457 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3458 RTE_PTYPE_TUNNEL_IP |
3459 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3460 RTE_PTYPE_INNER_L4_UDP,
3462 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3463 RTE_PTYPE_TUNNEL_IP |
3464 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3465 RTE_PTYPE_INNER_L4_TCP,
3466 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3467 RTE_PTYPE_TUNNEL_IP |
3468 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3469 RTE_PTYPE_INNER_L4_SCTP,
3470 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3471 RTE_PTYPE_TUNNEL_IP |
3472 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3473 RTE_PTYPE_INNER_L4_ICMP,
3476 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3477 RTE_PTYPE_TUNNEL_IP |
3478 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3479 RTE_PTYPE_INNER_L4_FRAG,
3480 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3481 RTE_PTYPE_TUNNEL_IP |
3482 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3483 RTE_PTYPE_INNER_L4_NONFRAG,
3484 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3485 RTE_PTYPE_TUNNEL_IP |
3486 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3487 RTE_PTYPE_INNER_L4_UDP,
3489 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3490 RTE_PTYPE_TUNNEL_IP |
3491 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3492 RTE_PTYPE_INNER_L4_TCP,
3493 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3494 RTE_PTYPE_TUNNEL_IP |
3495 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3496 RTE_PTYPE_INNER_L4_SCTP,
3497 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3498 RTE_PTYPE_TUNNEL_IP |
3499 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3500 RTE_PTYPE_INNER_L4_ICMP,
3502 /* IPv4 --> GRE/Teredo/VXLAN */
3503 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3504 RTE_PTYPE_TUNNEL_GRENAT,
3506 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3507 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3508 RTE_PTYPE_TUNNEL_GRENAT |
3509 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3510 RTE_PTYPE_INNER_L4_FRAG,
3511 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3512 RTE_PTYPE_TUNNEL_GRENAT |
3513 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3514 RTE_PTYPE_INNER_L4_NONFRAG,
3515 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3516 RTE_PTYPE_TUNNEL_GRENAT |
3517 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3518 RTE_PTYPE_INNER_L4_UDP,
3520 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3521 RTE_PTYPE_TUNNEL_GRENAT |
3522 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3523 RTE_PTYPE_INNER_L4_TCP,
3524 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3525 RTE_PTYPE_TUNNEL_GRENAT |
3526 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3527 RTE_PTYPE_INNER_L4_SCTP,
3528 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3529 RTE_PTYPE_TUNNEL_GRENAT |
3530 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3531 RTE_PTYPE_INNER_L4_ICMP,
3533 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3534 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3535 RTE_PTYPE_TUNNEL_GRENAT |
3536 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3537 RTE_PTYPE_INNER_L4_FRAG,
3538 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3539 RTE_PTYPE_TUNNEL_GRENAT |
3540 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3541 RTE_PTYPE_INNER_L4_NONFRAG,
3542 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3543 RTE_PTYPE_TUNNEL_GRENAT |
3544 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3545 RTE_PTYPE_INNER_L4_UDP,
3547 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3548 RTE_PTYPE_TUNNEL_GRENAT |
3549 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3550 RTE_PTYPE_INNER_L4_TCP,
3551 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3552 RTE_PTYPE_TUNNEL_GRENAT |
3553 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3554 RTE_PTYPE_INNER_L4_SCTP,
3555 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3556 RTE_PTYPE_TUNNEL_GRENAT |
3557 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3558 RTE_PTYPE_INNER_L4_ICMP,
3560 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3561 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3562 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3564 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3565 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3566 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3567 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3568 RTE_PTYPE_INNER_L4_FRAG,
3569 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3570 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3571 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3572 RTE_PTYPE_INNER_L4_NONFRAG,
3573 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3574 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3575 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3576 RTE_PTYPE_INNER_L4_UDP,
3578 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3579 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3580 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3581 RTE_PTYPE_INNER_L4_TCP,
3582 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3583 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3584 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3585 RTE_PTYPE_INNER_L4_SCTP,
3586 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3587 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3588 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3589 RTE_PTYPE_INNER_L4_ICMP,
3591 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3592 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3593 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3594 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3595 RTE_PTYPE_INNER_L4_FRAG,
3596 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3597 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3598 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3599 RTE_PTYPE_INNER_L4_NONFRAG,
3600 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3601 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3602 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3603 RTE_PTYPE_INNER_L4_UDP,
3605 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3606 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3607 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3608 RTE_PTYPE_INNER_L4_TCP,
3609 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3610 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3611 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3612 RTE_PTYPE_INNER_L4_SCTP,
3613 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3614 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3615 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3616 RTE_PTYPE_INNER_L4_ICMP,
3617 /* [73] - [87] reserved */
3619 /* Non tunneled IPv6 */
3620 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3622 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3623 RTE_PTYPE_L4_NONFRAG,
3624 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3627 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3629 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3631 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3635 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3636 RTE_PTYPE_TUNNEL_IP |
3637 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3638 RTE_PTYPE_INNER_L4_FRAG,
3639 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3640 RTE_PTYPE_TUNNEL_IP |
3641 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3642 RTE_PTYPE_INNER_L4_NONFRAG,
3643 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3644 RTE_PTYPE_TUNNEL_IP |
3645 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3646 RTE_PTYPE_INNER_L4_UDP,
3648 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3649 RTE_PTYPE_TUNNEL_IP |
3650 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3651 RTE_PTYPE_INNER_L4_TCP,
3652 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3653 RTE_PTYPE_TUNNEL_IP |
3654 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3655 RTE_PTYPE_INNER_L4_SCTP,
3656 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3657 RTE_PTYPE_TUNNEL_IP |
3658 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3659 RTE_PTYPE_INNER_L4_ICMP,
3662 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3663 RTE_PTYPE_TUNNEL_IP |
3664 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3665 RTE_PTYPE_INNER_L4_FRAG,
3666 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3667 RTE_PTYPE_TUNNEL_IP |
3668 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3669 RTE_PTYPE_INNER_L4_NONFRAG,
3670 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3671 RTE_PTYPE_TUNNEL_IP |
3672 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3673 RTE_PTYPE_INNER_L4_UDP,
3674 /* [105] reserved */
3675 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3676 RTE_PTYPE_TUNNEL_IP |
3677 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3678 RTE_PTYPE_INNER_L4_TCP,
3679 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3680 RTE_PTYPE_TUNNEL_IP |
3681 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3682 RTE_PTYPE_INNER_L4_SCTP,
3683 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3684 RTE_PTYPE_TUNNEL_IP |
3685 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3686 RTE_PTYPE_INNER_L4_ICMP,
3688 /* IPv6 --> GRE/Teredo/VXLAN */
3689 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3690 RTE_PTYPE_TUNNEL_GRENAT,
3692 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3693 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3694 RTE_PTYPE_TUNNEL_GRENAT |
3695 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3696 RTE_PTYPE_INNER_L4_FRAG,
3697 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3698 RTE_PTYPE_TUNNEL_GRENAT |
3699 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3700 RTE_PTYPE_INNER_L4_NONFRAG,
3701 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3702 RTE_PTYPE_TUNNEL_GRENAT |
3703 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3704 RTE_PTYPE_INNER_L4_UDP,
3705 /* [113] reserved */
3706 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3707 RTE_PTYPE_TUNNEL_GRENAT |
3708 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3709 RTE_PTYPE_INNER_L4_TCP,
3710 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3711 RTE_PTYPE_TUNNEL_GRENAT |
3712 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3713 RTE_PTYPE_INNER_L4_SCTP,
3714 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3715 RTE_PTYPE_TUNNEL_GRENAT |
3716 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3717 RTE_PTYPE_INNER_L4_ICMP,
3719 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3720 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3721 RTE_PTYPE_TUNNEL_GRENAT |
3722 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3723 RTE_PTYPE_INNER_L4_FRAG,
3724 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3725 RTE_PTYPE_TUNNEL_GRENAT |
3726 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3727 RTE_PTYPE_INNER_L4_NONFRAG,
3728 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3729 RTE_PTYPE_TUNNEL_GRENAT |
3730 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3731 RTE_PTYPE_INNER_L4_UDP,
3732 /* [120] reserved */
3733 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3734 RTE_PTYPE_TUNNEL_GRENAT |
3735 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3736 RTE_PTYPE_INNER_L4_TCP,
3737 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3738 RTE_PTYPE_TUNNEL_GRENAT |
3739 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3740 RTE_PTYPE_INNER_L4_SCTP,
3741 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3742 RTE_PTYPE_TUNNEL_GRENAT |
3743 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3744 RTE_PTYPE_INNER_L4_ICMP,
3746 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3747 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3748 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3750 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3751 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3752 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3753 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3754 RTE_PTYPE_INNER_L4_FRAG,
3755 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3756 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3757 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3758 RTE_PTYPE_INNER_L4_NONFRAG,
3759 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3760 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3761 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3762 RTE_PTYPE_INNER_L4_UDP,
3763 /* [128] reserved */
3764 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3765 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3766 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3767 RTE_PTYPE_INNER_L4_TCP,
3768 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3769 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3770 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3771 RTE_PTYPE_INNER_L4_SCTP,
3772 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3773 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3774 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3775 RTE_PTYPE_INNER_L4_ICMP,
3777 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3778 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3779 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3780 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3781 RTE_PTYPE_INNER_L4_FRAG,
3782 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3783 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3784 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3785 RTE_PTYPE_INNER_L4_NONFRAG,
3786 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3787 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3788 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3789 RTE_PTYPE_INNER_L4_UDP,
3790 /* [135] reserved */
3791 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3792 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3793 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3794 RTE_PTYPE_INNER_L4_TCP,
3795 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3796 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3797 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3798 RTE_PTYPE_INNER_L4_SCTP,
3799 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3800 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3801 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3802 RTE_PTYPE_INNER_L4_ICMP,
3803 /* [139] - [299] reserved */
3806 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3807 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3809 /* PPPoE --> IPv4 */
3810 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3811 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3813 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3814 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3815 RTE_PTYPE_L4_NONFRAG,
3816 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3817 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3819 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3820 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3822 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3823 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3825 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3826 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3829 /* PPPoE --> IPv6 */
3830 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3831 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3833 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3834 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3835 RTE_PTYPE_L4_NONFRAG,
3836 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3837 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3839 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3840 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3842 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3843 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3845 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3846 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3848 /* [314] - [324] reserved */
3850 /* IPv4/IPv6 --> GTPC/GTPU */
3851 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3852 RTE_PTYPE_TUNNEL_GTPC,
3853 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3854 RTE_PTYPE_TUNNEL_GTPC,
3855 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3856 RTE_PTYPE_TUNNEL_GTPC,
3857 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3858 RTE_PTYPE_TUNNEL_GTPC,
3859 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3860 RTE_PTYPE_TUNNEL_GTPU,
3861 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3862 RTE_PTYPE_TUNNEL_GTPU,
3864 /* IPv4 --> GTPU --> IPv4 */
3865 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3866 RTE_PTYPE_TUNNEL_GTPU |
3867 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3868 RTE_PTYPE_INNER_L4_FRAG,
3869 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3870 RTE_PTYPE_TUNNEL_GTPU |
3871 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3872 RTE_PTYPE_INNER_L4_NONFRAG,
3873 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3874 RTE_PTYPE_TUNNEL_GTPU |
3875 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3876 RTE_PTYPE_INNER_L4_UDP,
3877 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3878 RTE_PTYPE_TUNNEL_GTPU |
3879 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3880 RTE_PTYPE_INNER_L4_TCP,
3881 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3882 RTE_PTYPE_TUNNEL_GTPU |
3883 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3884 RTE_PTYPE_INNER_L4_ICMP,
3886 /* IPv6 --> GTPU --> IPv4 */
3887 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3888 RTE_PTYPE_TUNNEL_GTPU |
3889 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3890 RTE_PTYPE_INNER_L4_FRAG,
3891 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3892 RTE_PTYPE_TUNNEL_GTPU |
3893 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3894 RTE_PTYPE_INNER_L4_NONFRAG,
3895 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3896 RTE_PTYPE_TUNNEL_GTPU |
3897 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3898 RTE_PTYPE_INNER_L4_UDP,
3899 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3900 RTE_PTYPE_TUNNEL_GTPU |
3901 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3902 RTE_PTYPE_INNER_L4_TCP,
3903 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3904 RTE_PTYPE_TUNNEL_GTPU |
3905 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3906 RTE_PTYPE_INNER_L4_ICMP,
3908 /* IPv4 --> GTPU --> IPv6 */
3909 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3910 RTE_PTYPE_TUNNEL_GTPU |
3911 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3912 RTE_PTYPE_INNER_L4_FRAG,
3913 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3914 RTE_PTYPE_TUNNEL_GTPU |
3915 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3916 RTE_PTYPE_INNER_L4_NONFRAG,
3917 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3918 RTE_PTYPE_TUNNEL_GTPU |
3919 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3920 RTE_PTYPE_INNER_L4_UDP,
3921 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3922 RTE_PTYPE_TUNNEL_GTPU |
3923 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3924 RTE_PTYPE_INNER_L4_TCP,
3925 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3926 RTE_PTYPE_TUNNEL_GTPU |
3927 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3928 RTE_PTYPE_INNER_L4_ICMP,
3930 /* IPv6 --> GTPU --> IPv6 */
3931 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3932 RTE_PTYPE_TUNNEL_GTPU |
3933 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3934 RTE_PTYPE_INNER_L4_FRAG,
3935 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3936 RTE_PTYPE_TUNNEL_GTPU |
3937 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3938 RTE_PTYPE_INNER_L4_NONFRAG,
3939 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3940 RTE_PTYPE_TUNNEL_GTPU |
3941 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3942 RTE_PTYPE_INNER_L4_UDP,
3943 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3944 RTE_PTYPE_TUNNEL_GTPU |
3945 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3946 RTE_PTYPE_INNER_L4_TCP,
3947 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3948 RTE_PTYPE_TUNNEL_GTPU |
3949 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3950 RTE_PTYPE_INNER_L4_ICMP,
3952 /* IPv4 --> UDP ECPRI */
3953 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3955 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3957 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3959 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3961 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3963 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3965 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3967 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3969 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3971 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3974 /* IPV6 --> UDP ECPRI */
3975 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3977 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3979 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3981 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3983 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3985 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3987 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3989 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3991 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3993 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3995 /* All others reserved */
3998 return type_table[ptype];
4002 ice_set_default_ptype_table(struct rte_eth_dev *dev)
4004 struct ice_adapter *ad =
4005 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4008 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4009 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4012 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
4013 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
4014 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4015 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4016 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4018 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
4019 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
4020 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4021 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
4022 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
4023 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4026 * check the programming status descriptor in rx queue.
4027 * done after Programming Flow Director is programmed on
4031 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4033 volatile union ice_32byte_rx_desc *rxdp;
4040 rxdp = (volatile union ice_32byte_rx_desc *)
4041 (&rxq->rx_ring[rxq->rx_tail]);
4042 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4043 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4044 >> ICE_RXD_QW1_STATUS_S;
4046 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4048 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4049 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4050 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4051 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4053 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4054 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4055 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4056 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4060 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4061 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4063 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4067 rxdp->wb.qword1.status_error_len = 0;
4069 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4071 if (rxq->rx_tail == 0)
4072 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4074 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4080 #define ICE_FDIR_MAX_WAIT_US 10000
4083 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4085 struct ice_tx_queue *txq = pf->fdir.txq;
4086 struct ice_rx_queue *rxq = pf->fdir.rxq;
4087 volatile struct ice_fltr_desc *fdirdp;
4088 volatile struct ice_tx_desc *txdp;
4092 fdirdp = (volatile struct ice_fltr_desc *)
4093 (&txq->tx_ring[txq->tx_tail]);
4094 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4095 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4097 txdp = &txq->tx_ring[txq->tx_tail + 1];
4098 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4099 td_cmd = ICE_TX_DESC_CMD_EOP |
4100 ICE_TX_DESC_CMD_RS |
4101 ICE_TX_DESC_CMD_DUMMY;
4103 txdp->cmd_type_offset_bsz =
4104 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4107 if (txq->tx_tail >= txq->nb_tx_desc)
4109 /* Update the tx tail register */
4110 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4111 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4112 if ((txdp->cmd_type_offset_bsz &
4113 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4114 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4118 if (i >= ICE_FDIR_MAX_WAIT_US) {
4120 "Failed to program FDIR filter: time out to get DD on tx queue.");
4124 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4127 ret = ice_check_fdir_programming_status(rxq);
4135 "Failed to program FDIR filter: programming status reported.");