1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <ethdev_driver.h>
9 #include "rte_pmd_ice.h"
12 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
16 PKT_TX_OUTER_IP_CKSUM)
18 /* Offset of mbuf dynamic field for protocol extraction data */
19 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
21 /* Mask of mbuf dynamic flags for protocol extraction type */
22 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
30 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
32 volatile union ice_rx_flex_desc *rxdp;
33 struct ice_rx_queue *rxq = rx_queue;
37 rxdp = &rxq->rx_ring[desc];
38 /* watch for changes in status bit */
39 pmc->addr = &rxdp->wb.status_error0;
42 * we expect the DD bit to be set to 1 if this descriptor was already
45 pmc->val = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
46 pmc->mask = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
48 /* register is 16-bit */
49 pmc->size = sizeof(uint16_t);
56 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
58 static uint8_t rxdid_map[] = {
59 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
60 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
61 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
62 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
63 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
64 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
65 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
68 return xtr_type < RTE_DIM(rxdid_map) ?
69 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
73 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
75 volatile union ice_rx_flex_desc *rxdp)
77 volatile struct ice_32b_rx_flex_desc_comms *desc =
78 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
79 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
81 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
82 mb->ol_flags |= PKT_RX_RSS_HASH;
83 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
86 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
87 if (desc->flow_id != 0xFFFFFFFF) {
88 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
89 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
95 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
97 volatile union ice_rx_flex_desc *rxdp)
99 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
100 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
101 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
105 if (desc->flow_id != 0xFFFFFFFF) {
106 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
107 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
110 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
111 stat_err = rte_le_to_cpu_16(desc->status_error0);
112 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
113 mb->ol_flags |= PKT_RX_RSS_HASH;
114 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
120 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
122 volatile union ice_rx_flex_desc *rxdp)
124 volatile struct ice_32b_rx_flex_desc_comms *desc =
125 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
128 stat_err = rte_le_to_cpu_16(desc->status_error0);
129 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
130 mb->ol_flags |= PKT_RX_RSS_HASH;
131 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
134 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
135 if (desc->flow_id != 0xFFFFFFFF) {
136 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
137 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
140 if (rxq->xtr_ol_flag) {
141 uint32_t metadata = 0;
143 stat_err = rte_le_to_cpu_16(desc->status_error1);
145 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
146 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
148 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
150 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
153 mb->ol_flags |= rxq->xtr_ol_flag;
155 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
162 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
164 volatile union ice_rx_flex_desc *rxdp)
166 volatile struct ice_32b_rx_flex_desc_comms *desc =
167 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
170 stat_err = rte_le_to_cpu_16(desc->status_error0);
171 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
172 mb->ol_flags |= PKT_RX_RSS_HASH;
173 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
176 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
177 if (desc->flow_id != 0xFFFFFFFF) {
178 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
179 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
182 if (rxq->xtr_ol_flag) {
183 uint32_t metadata = 0;
185 if (desc->flex_ts.flex.aux0 != 0xFFFF)
186 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
187 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
188 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
191 mb->ol_flags |= rxq->xtr_ol_flag;
193 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
200 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
203 case ICE_RXDID_COMMS_AUX_VLAN:
204 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
205 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
208 case ICE_RXDID_COMMS_AUX_IPV4:
209 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
210 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
213 case ICE_RXDID_COMMS_AUX_IPV6:
214 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
215 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
218 case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
219 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
220 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
223 case ICE_RXDID_COMMS_AUX_TCP:
224 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
225 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
228 case ICE_RXDID_COMMS_AUX_IP_OFFSET:
229 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
230 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
233 case ICE_RXDID_COMMS_GENERIC:
234 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
237 case ICE_RXDID_COMMS_OVS:
238 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
242 /* update this according to the RXDID for PROTO_XTR_NONE */
243 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
247 if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
248 rxq->xtr_ol_flag = 0;
251 static enum ice_status
252 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
254 struct ice_vsi *vsi = rxq->vsi;
255 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
256 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
257 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
258 struct ice_rlan_ctx rx_ctx;
260 uint16_t buf_size, len;
261 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
262 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
265 /* Set buffer size as the head split is disabled. */
266 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
267 RTE_PKTMBUF_HEADROOM);
269 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
270 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
271 rxq->max_pkt_len = RTE_MIN(len,
272 dev->data->dev_conf.rxmode.max_rx_pkt_len);
274 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
275 if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
276 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
277 PMD_DRV_LOG(ERR, "maximum packet length must "
278 "be larger than %u and smaller than %u,"
279 "as jumbo frame is enabled",
280 (uint32_t)ICE_ETH_MAX_LEN,
281 (uint32_t)ICE_FRAME_SIZE_MAX);
285 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
286 rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
287 PMD_DRV_LOG(ERR, "maximum packet length must be "
288 "larger than %u and smaller than %u, "
289 "as jumbo frame is disabled",
290 (uint32_t)RTE_ETHER_MIN_LEN,
291 (uint32_t)ICE_ETH_MAX_LEN);
296 memset(&rx_ctx, 0, sizeof(rx_ctx));
298 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
299 rx_ctx.qlen = rxq->nb_rx_desc;
300 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
301 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
302 rx_ctx.dtype = 0; /* No Header Split mode */
303 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
304 rx_ctx.dsize = 1; /* 32B descriptors */
306 rx_ctx.rxmax = rxq->max_pkt_len;
307 /* TPH: Transaction Layer Packet (TLP) processing hints */
308 rx_ctx.tphrdesc_ena = 1;
309 rx_ctx.tphwdesc_ena = 1;
310 rx_ctx.tphdata_ena = 1;
311 rx_ctx.tphhead_ena = 1;
312 /* Low Receive Queue Threshold defined in 64 descriptors units.
313 * When the number of free descriptors goes below the lrxqthresh,
314 * an immediate interrupt is triggered.
316 rx_ctx.lrxqthresh = 2;
317 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
320 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
322 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
324 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
325 rxq->port_id, rxq->queue_id, rxdid);
327 if (!(pf->supported_rxdid & BIT(rxdid))) {
328 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
333 ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
335 /* Enable Flexible Descriptors in the queue context which
336 * allows this driver to select a specific receive descriptor format
338 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
339 QRXFLXP_CNTXT_RXDID_IDX_M;
341 /* increasing context priority to pick up profile ID;
342 * default is 0x01; setting to 0x03 to ensure profile
343 * is programming if prev context is of same priority
345 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
346 QRXFLXP_CNTXT_RXDID_PRIO_M;
348 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
350 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
352 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
356 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
358 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
363 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
364 RTE_PKTMBUF_HEADROOM);
366 /* Check if scattered RX needs to be used. */
367 if (rxq->max_pkt_len > buf_size)
368 dev->data->scattered_rx = 1;
370 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
372 /* Init the Rx tail register*/
373 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
378 /* Allocate mbufs for all descriptors in rx queue */
380 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
382 struct ice_rx_entry *rxe = rxq->sw_ring;
386 for (i = 0; i < rxq->nb_rx_desc; i++) {
387 volatile union ice_rx_flex_desc *rxd;
388 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
390 if (unlikely(!mbuf)) {
391 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
395 rte_mbuf_refcnt_set(mbuf, 1);
397 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
399 mbuf->port = rxq->port_id;
402 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
404 rxd = &rxq->rx_ring[i];
405 rxd->read.pkt_addr = dma_addr;
406 rxd->read.hdr_addr = 0;
407 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
417 /* Free all mbufs for descriptors in rx queue */
419 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
423 if (!rxq || !rxq->sw_ring) {
424 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
428 for (i = 0; i < rxq->nb_rx_desc; i++) {
429 if (rxq->sw_ring[i].mbuf) {
430 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
431 rxq->sw_ring[i].mbuf = NULL;
434 if (rxq->rx_nb_avail == 0)
436 for (i = 0; i < rxq->rx_nb_avail; i++)
437 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
439 rxq->rx_nb_avail = 0;
442 /* turn on or off rx queue
443 * @q_idx: queue index in pf scope
444 * @on: turn on or off the queue
447 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
452 /* QRX_CTRL = QRX_ENA */
453 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
456 if (reg & QRX_CTRL_QENA_STAT_M)
457 return 0; /* Already on, skip */
458 reg |= QRX_CTRL_QENA_REQ_M;
460 if (!(reg & QRX_CTRL_QENA_STAT_M))
461 return 0; /* Already off, skip */
462 reg &= ~QRX_CTRL_QENA_REQ_M;
465 /* Write the register */
466 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
467 /* Check the result. It is said that QENA_STAT
468 * follows the QENA_REQ not more than 10 use.
469 * TODO: need to change the wait counter later
471 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
472 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
473 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
475 if ((reg & QRX_CTRL_QENA_REQ_M) &&
476 (reg & QRX_CTRL_QENA_STAT_M))
479 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
480 !(reg & QRX_CTRL_QENA_STAT_M))
485 /* Check if it is timeout */
486 if (j >= ICE_CHK_Q_ENA_COUNT) {
487 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
488 (on ? "enable" : "disable"), q_idx);
496 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
500 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
501 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
502 "rxq->rx_free_thresh=%d, "
503 "ICE_RX_MAX_BURST=%d",
504 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
506 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
507 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
508 "rxq->rx_free_thresh=%d, "
509 "rxq->nb_rx_desc=%d",
510 rxq->rx_free_thresh, rxq->nb_rx_desc);
512 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
513 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
514 "rxq->nb_rx_desc=%d, "
515 "rxq->rx_free_thresh=%d",
516 rxq->nb_rx_desc, rxq->rx_free_thresh);
523 /* reset fields in ice_rx_queue back to default */
525 ice_reset_rx_queue(struct ice_rx_queue *rxq)
531 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
535 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
537 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
538 ((volatile char *)rxq->rx_ring)[i] = 0;
540 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
541 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
542 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
544 rxq->rx_nb_avail = 0;
545 rxq->rx_next_avail = 0;
546 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
550 rxq->pkt_first_seg = NULL;
551 rxq->pkt_last_seg = NULL;
553 rxq->rxrearm_start = 0;
558 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
560 struct ice_rx_queue *rxq;
562 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
564 PMD_INIT_FUNC_TRACE();
566 if (rx_queue_id >= dev->data->nb_rx_queues) {
567 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
568 rx_queue_id, dev->data->nb_rx_queues);
572 rxq = dev->data->rx_queues[rx_queue_id];
573 if (!rxq || !rxq->q_set) {
574 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
579 err = ice_program_hw_rx_queue(rxq);
581 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
586 err = ice_alloc_rx_queue_mbufs(rxq);
588 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
592 /* Init the RX tail register. */
593 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
595 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
597 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
600 rxq->rx_rel_mbufs(rxq);
601 ice_reset_rx_queue(rxq);
605 dev->data->rx_queue_state[rx_queue_id] =
606 RTE_ETH_QUEUE_STATE_STARTED;
612 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
614 struct ice_rx_queue *rxq;
616 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
618 if (rx_queue_id < dev->data->nb_rx_queues) {
619 rxq = dev->data->rx_queues[rx_queue_id];
621 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
623 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
627 rxq->rx_rel_mbufs(rxq);
628 ice_reset_rx_queue(rxq);
629 dev->data->rx_queue_state[rx_queue_id] =
630 RTE_ETH_QUEUE_STATE_STOPPED;
637 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
639 struct ice_tx_queue *txq;
643 struct ice_aqc_add_tx_qgrp *txq_elem;
644 struct ice_tlan_ctx tx_ctx;
647 PMD_INIT_FUNC_TRACE();
649 if (tx_queue_id >= dev->data->nb_tx_queues) {
650 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
651 tx_queue_id, dev->data->nb_tx_queues);
655 txq = dev->data->tx_queues[tx_queue_id];
656 if (!txq || !txq->q_set) {
657 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
662 buf_len = ice_struct_size(txq_elem, txqs, 1);
663 txq_elem = ice_malloc(hw, buf_len);
668 hw = ICE_VSI_TO_HW(vsi);
670 memset(&tx_ctx, 0, sizeof(tx_ctx));
671 txq_elem->num_txqs = 1;
672 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
674 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
675 tx_ctx.qlen = txq->nb_tx_desc;
676 tx_ctx.pf_num = hw->pf_id;
677 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
678 tx_ctx.src_vsi = vsi->vsi_id;
679 tx_ctx.port_num = hw->port_info->lport;
680 tx_ctx.tso_ena = 1; /* tso enable */
681 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
682 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
684 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
687 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
689 /* Init the Tx tail register*/
690 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
692 /* Fix me, we assume TC always 0 here */
693 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
694 txq_elem, buf_len, NULL);
696 PMD_DRV_LOG(ERR, "Failed to add lan txq");
700 /* store the schedule node id */
701 txq->q_teid = txq_elem->txqs[0].q_teid;
703 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
709 static enum ice_status
710 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
712 struct ice_vsi *vsi = rxq->vsi;
713 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
714 uint32_t rxdid = ICE_RXDID_LEGACY_1;
715 struct ice_rlan_ctx rx_ctx;
720 rxq->rx_buf_len = 1024;
722 memset(&rx_ctx, 0, sizeof(rx_ctx));
724 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
725 rx_ctx.qlen = rxq->nb_rx_desc;
726 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
727 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
728 rx_ctx.dtype = 0; /* No Header Split mode */
729 rx_ctx.dsize = 1; /* 32B descriptors */
730 rx_ctx.rxmax = ICE_ETH_MAX_LEN;
731 /* TPH: Transaction Layer Packet (TLP) processing hints */
732 rx_ctx.tphrdesc_ena = 1;
733 rx_ctx.tphwdesc_ena = 1;
734 rx_ctx.tphdata_ena = 1;
735 rx_ctx.tphhead_ena = 1;
736 /* Low Receive Queue Threshold defined in 64 descriptors units.
737 * When the number of free descriptors goes below the lrxqthresh,
738 * an immediate interrupt is triggered.
740 rx_ctx.lrxqthresh = 2;
741 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
744 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
746 /* Enable Flexible Descriptors in the queue context which
747 * allows this driver to select a specific receive descriptor format
749 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
750 QRXFLXP_CNTXT_RXDID_IDX_M;
752 /* increasing context priority to pick up profile ID;
753 * default is 0x01; setting to 0x03 to ensure profile
754 * is programming if prev context is of same priority
756 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
757 QRXFLXP_CNTXT_RXDID_PRIO_M;
759 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
761 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
763 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
767 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
769 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
774 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
776 /* Init the Rx tail register*/
777 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
783 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
785 struct ice_rx_queue *rxq;
787 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
788 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
790 PMD_INIT_FUNC_TRACE();
793 if (!rxq || !rxq->q_set) {
794 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
799 err = ice_fdir_program_hw_rx_queue(rxq);
801 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
806 /* Init the RX tail register. */
807 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
809 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
811 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
814 ice_reset_rx_queue(rxq);
822 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
824 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
825 struct ice_tx_queue *txq;
829 struct ice_aqc_add_tx_qgrp *txq_elem;
830 struct ice_tlan_ctx tx_ctx;
833 PMD_INIT_FUNC_TRACE();
836 if (!txq || !txq->q_set) {
837 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
842 buf_len = ice_struct_size(txq_elem, txqs, 1);
843 txq_elem = ice_malloc(hw, buf_len);
848 hw = ICE_VSI_TO_HW(vsi);
850 memset(&tx_ctx, 0, sizeof(tx_ctx));
851 txq_elem->num_txqs = 1;
852 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
854 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
855 tx_ctx.qlen = txq->nb_tx_desc;
856 tx_ctx.pf_num = hw->pf_id;
857 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
858 tx_ctx.src_vsi = vsi->vsi_id;
859 tx_ctx.port_num = hw->port_info->lport;
860 tx_ctx.tso_ena = 1; /* tso enable */
861 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
862 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
864 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
867 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
869 /* Init the Tx tail register*/
870 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
872 /* Fix me, we assume TC always 0 here */
873 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
874 txq_elem, buf_len, NULL);
876 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
880 /* store the schedule node id */
881 txq->q_teid = txq_elem->txqs[0].q_teid;
887 /* Free all mbufs for descriptors in tx queue */
889 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
893 if (!txq || !txq->sw_ring) {
894 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
898 for (i = 0; i < txq->nb_tx_desc; i++) {
899 if (txq->sw_ring[i].mbuf) {
900 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
901 txq->sw_ring[i].mbuf = NULL;
907 ice_reset_tx_queue(struct ice_tx_queue *txq)
909 struct ice_tx_entry *txe;
910 uint16_t i, prev, size;
913 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
918 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
919 for (i = 0; i < size; i++)
920 ((volatile char *)txq->tx_ring)[i] = 0;
922 prev = (uint16_t)(txq->nb_tx_desc - 1);
923 for (i = 0; i < txq->nb_tx_desc; i++) {
924 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
926 txd->cmd_type_offset_bsz =
927 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
930 txe[prev].next_id = i;
934 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
935 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
940 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
941 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
945 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
947 struct ice_tx_queue *txq;
948 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
949 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
950 struct ice_vsi *vsi = pf->main_vsi;
951 enum ice_status status;
954 uint16_t q_handle = tx_queue_id;
956 if (tx_queue_id >= dev->data->nb_tx_queues) {
957 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
958 tx_queue_id, dev->data->nb_tx_queues);
962 txq = dev->data->tx_queues[tx_queue_id];
964 PMD_DRV_LOG(ERR, "TX queue %u is not available",
969 q_ids[0] = txq->reg_idx;
970 q_teids[0] = txq->q_teid;
972 /* Fix me, we assume TC always 0 here */
973 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
974 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
975 if (status != ICE_SUCCESS) {
976 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
980 txq->tx_rel_mbufs(txq);
981 ice_reset_tx_queue(txq);
982 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
988 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
990 struct ice_rx_queue *rxq;
992 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
993 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
997 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
999 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1003 rxq->rx_rel_mbufs(rxq);
1009 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1011 struct ice_tx_queue *txq;
1012 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1013 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1014 struct ice_vsi *vsi = pf->main_vsi;
1015 enum ice_status status;
1017 uint32_t q_teids[1];
1018 uint16_t q_handle = tx_queue_id;
1022 PMD_DRV_LOG(ERR, "TX queue %u is not available",
1028 q_ids[0] = txq->reg_idx;
1029 q_teids[0] = txq->q_teid;
1031 /* Fix me, we assume TC always 0 here */
1032 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1033 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1034 if (status != ICE_SUCCESS) {
1035 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1039 txq->tx_rel_mbufs(txq);
1045 ice_rx_queue_setup(struct rte_eth_dev *dev,
1048 unsigned int socket_id,
1049 const struct rte_eth_rxconf *rx_conf,
1050 struct rte_mempool *mp)
1052 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1053 struct ice_adapter *ad =
1054 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1055 struct ice_vsi *vsi = pf->main_vsi;
1056 struct ice_rx_queue *rxq;
1057 const struct rte_memzone *rz;
1060 int use_def_burst_func = 1;
1062 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1063 nb_desc > ICE_MAX_RING_DESC ||
1064 nb_desc < ICE_MIN_RING_DESC) {
1065 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1066 "invalid", nb_desc);
1070 /* Free memory if needed */
1071 if (dev->data->rx_queues[queue_idx]) {
1072 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1073 dev->data->rx_queues[queue_idx] = NULL;
1076 /* Allocate the rx queue data structure */
1077 rxq = rte_zmalloc_socket(NULL,
1078 sizeof(struct ice_rx_queue),
1079 RTE_CACHE_LINE_SIZE,
1082 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1083 "rx queue data structure");
1087 rxq->nb_rx_desc = nb_desc;
1088 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1089 rxq->queue_id = queue_idx;
1091 rxq->reg_idx = vsi->base_queue + queue_idx;
1092 rxq->port_id = dev->data->port_id;
1093 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1094 rxq->crc_len = RTE_ETHER_CRC_LEN;
1098 rxq->drop_en = rx_conf->rx_drop_en;
1100 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1101 rxq->proto_xtr = pf->proto_xtr != NULL ?
1102 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1104 /* Allocate the maximun number of RX ring hardware descriptor. */
1105 len = ICE_MAX_RING_DESC;
1108 * Allocating a little more memory because vectorized/bulk_alloc Rx
1109 * functions doesn't check boundaries each time.
1111 len += ICE_RX_MAX_BURST;
1113 /* Allocate the maximum number of RX ring hardware descriptor. */
1114 ring_size = sizeof(union ice_rx_flex_desc) * len;
1115 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1116 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1117 ring_size, ICE_RING_BASE_ALIGN,
1120 ice_rx_queue_release(rxq);
1121 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1125 /* Zero all the descriptors in the ring. */
1126 memset(rz->addr, 0, ring_size);
1128 rxq->rx_ring_dma = rz->iova;
1129 rxq->rx_ring = rz->addr;
1131 /* always reserve more for bulk alloc */
1132 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1134 /* Allocate the software ring. */
1135 rxq->sw_ring = rte_zmalloc_socket(NULL,
1136 sizeof(struct ice_rx_entry) * len,
1137 RTE_CACHE_LINE_SIZE,
1139 if (!rxq->sw_ring) {
1140 ice_rx_queue_release(rxq);
1141 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1145 ice_reset_rx_queue(rxq);
1147 dev->data->rx_queues[queue_idx] = rxq;
1148 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1150 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1152 if (!use_def_burst_func) {
1153 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1154 "satisfied. Rx Burst Bulk Alloc function will be "
1155 "used on port=%d, queue=%d.",
1156 rxq->port_id, rxq->queue_id);
1158 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1159 "not satisfied, Scattered Rx is requested. "
1160 "on port=%d, queue=%d.",
1161 rxq->port_id, rxq->queue_id);
1162 ad->rx_bulk_alloc_allowed = false;
1169 ice_rx_queue_release(void *rxq)
1171 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1174 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1179 rte_free(q->sw_ring);
1184 ice_tx_queue_setup(struct rte_eth_dev *dev,
1187 unsigned int socket_id,
1188 const struct rte_eth_txconf *tx_conf)
1190 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1191 struct ice_vsi *vsi = pf->main_vsi;
1192 struct ice_tx_queue *txq;
1193 const struct rte_memzone *tz;
1195 uint16_t tx_rs_thresh, tx_free_thresh;
1198 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1200 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1201 nb_desc > ICE_MAX_RING_DESC ||
1202 nb_desc < ICE_MIN_RING_DESC) {
1203 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1204 "invalid", nb_desc);
1209 * The following two parameters control the setting of the RS bit on
1210 * transmit descriptors. TX descriptors will have their RS bit set
1211 * after txq->tx_rs_thresh descriptors have been used. The TX
1212 * descriptor ring will be cleaned after txq->tx_free_thresh
1213 * descriptors are used or if the number of descriptors required to
1214 * transmit a packet is greater than the number of free TX descriptors.
1216 * The following constraints must be satisfied:
1217 * - tx_rs_thresh must be greater than 0.
1218 * - tx_rs_thresh must be less than the size of the ring minus 2.
1219 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1220 * - tx_rs_thresh must be a divisor of the ring size.
1221 * - tx_free_thresh must be greater than 0.
1222 * - tx_free_thresh must be less than the size of the ring minus 3.
1223 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1225 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1226 * race condition, hence the maximum threshold constraints. When set
1227 * to zero use default values.
1229 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1230 tx_conf->tx_free_thresh :
1231 ICE_DEFAULT_TX_FREE_THRESH);
1232 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1234 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1235 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1236 if (tx_conf->tx_rs_thresh)
1237 tx_rs_thresh = tx_conf->tx_rs_thresh;
1238 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1239 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1240 "exceed nb_desc. (tx_rs_thresh=%u "
1241 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1242 (unsigned int)tx_rs_thresh,
1243 (unsigned int)tx_free_thresh,
1244 (unsigned int)nb_desc,
1245 (int)dev->data->port_id,
1249 if (tx_rs_thresh >= (nb_desc - 2)) {
1250 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1251 "number of TX descriptors minus 2. "
1252 "(tx_rs_thresh=%u port=%d queue=%d)",
1253 (unsigned int)tx_rs_thresh,
1254 (int)dev->data->port_id,
1258 if (tx_free_thresh >= (nb_desc - 3)) {
1259 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1260 "tx_free_thresh must be less than the "
1261 "number of TX descriptors minus 3. "
1262 "(tx_free_thresh=%u port=%d queue=%d)",
1263 (unsigned int)tx_free_thresh,
1264 (int)dev->data->port_id,
1268 if (tx_rs_thresh > tx_free_thresh) {
1269 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1270 "equal to tx_free_thresh. (tx_free_thresh=%u"
1271 " tx_rs_thresh=%u port=%d queue=%d)",
1272 (unsigned int)tx_free_thresh,
1273 (unsigned int)tx_rs_thresh,
1274 (int)dev->data->port_id,
1278 if ((nb_desc % tx_rs_thresh) != 0) {
1279 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1280 "number of TX descriptors. (tx_rs_thresh=%u"
1281 " port=%d queue=%d)",
1282 (unsigned int)tx_rs_thresh,
1283 (int)dev->data->port_id,
1287 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1288 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1289 "tx_rs_thresh is greater than 1. "
1290 "(tx_rs_thresh=%u port=%d queue=%d)",
1291 (unsigned int)tx_rs_thresh,
1292 (int)dev->data->port_id,
1297 /* Free memory if needed. */
1298 if (dev->data->tx_queues[queue_idx]) {
1299 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1300 dev->data->tx_queues[queue_idx] = NULL;
1303 /* Allocate the TX queue data structure. */
1304 txq = rte_zmalloc_socket(NULL,
1305 sizeof(struct ice_tx_queue),
1306 RTE_CACHE_LINE_SIZE,
1309 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1310 "tx queue structure");
1314 /* Allocate TX hardware ring descriptors. */
1315 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1316 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1317 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1318 ring_size, ICE_RING_BASE_ALIGN,
1321 ice_tx_queue_release(txq);
1322 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1326 txq->nb_tx_desc = nb_desc;
1327 txq->tx_rs_thresh = tx_rs_thresh;
1328 txq->tx_free_thresh = tx_free_thresh;
1329 txq->pthresh = tx_conf->tx_thresh.pthresh;
1330 txq->hthresh = tx_conf->tx_thresh.hthresh;
1331 txq->wthresh = tx_conf->tx_thresh.wthresh;
1332 txq->queue_id = queue_idx;
1334 txq->reg_idx = vsi->base_queue + queue_idx;
1335 txq->port_id = dev->data->port_id;
1336 txq->offloads = offloads;
1338 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1340 txq->tx_ring_dma = tz->iova;
1341 txq->tx_ring = tz->addr;
1343 /* Allocate software ring */
1345 rte_zmalloc_socket(NULL,
1346 sizeof(struct ice_tx_entry) * nb_desc,
1347 RTE_CACHE_LINE_SIZE,
1349 if (!txq->sw_ring) {
1350 ice_tx_queue_release(txq);
1351 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1355 ice_reset_tx_queue(txq);
1357 dev->data->tx_queues[queue_idx] = txq;
1358 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1359 ice_set_tx_function_flag(dev, txq);
1365 ice_tx_queue_release(void *txq)
1367 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1370 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1375 rte_free(q->sw_ring);
1380 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1381 struct rte_eth_rxq_info *qinfo)
1383 struct ice_rx_queue *rxq;
1385 rxq = dev->data->rx_queues[queue_id];
1387 qinfo->mp = rxq->mp;
1388 qinfo->scattered_rx = dev->data->scattered_rx;
1389 qinfo->nb_desc = rxq->nb_rx_desc;
1391 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1392 qinfo->conf.rx_drop_en = rxq->drop_en;
1393 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1397 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1398 struct rte_eth_txq_info *qinfo)
1400 struct ice_tx_queue *txq;
1402 txq = dev->data->tx_queues[queue_id];
1404 qinfo->nb_desc = txq->nb_tx_desc;
1406 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1407 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1408 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1410 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1411 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1412 qinfo->conf.offloads = txq->offloads;
1413 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1417 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1419 #define ICE_RXQ_SCAN_INTERVAL 4
1420 volatile union ice_rx_flex_desc *rxdp;
1421 struct ice_rx_queue *rxq;
1424 rxq = dev->data->rx_queues[rx_queue_id];
1425 rxdp = &rxq->rx_ring[rxq->rx_tail];
1426 while ((desc < rxq->nb_rx_desc) &&
1427 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1428 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1430 * Check the DD bit of a rx descriptor of each 4 in a group,
1431 * to avoid checking too frequently and downgrading performance
1434 desc += ICE_RXQ_SCAN_INTERVAL;
1435 rxdp += ICE_RXQ_SCAN_INTERVAL;
1436 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1437 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1438 desc - rxq->nb_rx_desc]);
1444 #define ICE_RX_FLEX_ERR0_BITS \
1445 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1446 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1447 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1448 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1449 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1450 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1452 /* Rx L3/L4 checksum */
1453 static inline uint64_t
1454 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1458 /* check if HW has decoded the packet and checksum */
1459 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1462 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1463 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1467 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1468 flags |= PKT_RX_IP_CKSUM_BAD;
1470 flags |= PKT_RX_IP_CKSUM_GOOD;
1472 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1473 flags |= PKT_RX_L4_CKSUM_BAD;
1475 flags |= PKT_RX_L4_CKSUM_GOOD;
1477 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1478 flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1480 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1481 flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1483 flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1489 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1491 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1492 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1493 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1495 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1496 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1497 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1502 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1503 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1504 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1505 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1506 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1507 mb->vlan_tci_outer = mb->vlan_tci;
1508 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1509 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1510 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1511 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1513 mb->vlan_tci_outer = 0;
1516 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1517 mb->vlan_tci, mb->vlan_tci_outer);
1520 #define ICE_LOOK_AHEAD 8
1521 #if (ICE_LOOK_AHEAD != 8)
1522 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1525 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1527 volatile union ice_rx_flex_desc *rxdp;
1528 struct ice_rx_entry *rxep;
1529 struct rte_mbuf *mb;
1532 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1533 int32_t i, j, nb_rx = 0;
1534 uint64_t pkt_flags = 0;
1535 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1537 rxdp = &rxq->rx_ring[rxq->rx_tail];
1538 rxep = &rxq->sw_ring[rxq->rx_tail];
1540 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1542 /* Make sure there is at least 1 packet to receive */
1543 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1547 * Scan LOOK_AHEAD descriptors at a time to determine which
1548 * descriptors reference packets that are ready to be received.
1550 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1551 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1552 /* Read desc statuses backwards to avoid race condition */
1553 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1554 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1558 /* Compute how many status bits were set */
1559 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1560 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1564 /* Translate descriptor info to mbuf parameters */
1565 for (j = 0; j < nb_dd; j++) {
1567 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1568 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1569 mb->data_len = pkt_len;
1570 mb->pkt_len = pkt_len;
1572 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1573 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1574 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1575 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1576 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1577 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1579 mb->ol_flags |= pkt_flags;
1582 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1583 rxq->rx_stage[i + j] = rxep[j].mbuf;
1585 if (nb_dd != ICE_LOOK_AHEAD)
1589 /* Clear software ring entries */
1590 for (i = 0; i < nb_rx; i++)
1591 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1593 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1594 "port_id=%u, queue_id=%u, nb_rx=%d",
1595 rxq->port_id, rxq->queue_id, nb_rx);
1600 static inline uint16_t
1601 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1602 struct rte_mbuf **rx_pkts,
1606 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1608 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1610 for (i = 0; i < nb_pkts; i++)
1611 rx_pkts[i] = stage[i];
1613 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1614 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1620 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1622 volatile union ice_rx_flex_desc *rxdp;
1623 struct ice_rx_entry *rxep;
1624 struct rte_mbuf *mb;
1625 uint16_t alloc_idx, i;
1629 /* Allocate buffers in bulk */
1630 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1631 (rxq->rx_free_thresh - 1));
1632 rxep = &rxq->sw_ring[alloc_idx];
1633 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1634 rxq->rx_free_thresh);
1635 if (unlikely(diag != 0)) {
1636 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1640 rxdp = &rxq->rx_ring[alloc_idx];
1641 for (i = 0; i < rxq->rx_free_thresh; i++) {
1642 if (likely(i < (rxq->rx_free_thresh - 1)))
1643 /* Prefetch next mbuf */
1644 rte_prefetch0(rxep[i + 1].mbuf);
1647 rte_mbuf_refcnt_set(mb, 1);
1649 mb->data_off = RTE_PKTMBUF_HEADROOM;
1651 mb->port = rxq->port_id;
1652 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1653 rxdp[i].read.hdr_addr = 0;
1654 rxdp[i].read.pkt_addr = dma_addr;
1657 /* Update rx tail regsiter */
1658 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1660 rxq->rx_free_trigger =
1661 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1662 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1663 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1668 static inline uint16_t
1669 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1671 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1673 struct rte_eth_dev *dev;
1678 if (rxq->rx_nb_avail)
1679 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1681 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1682 rxq->rx_next_avail = 0;
1683 rxq->rx_nb_avail = nb_rx;
1684 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1686 if (rxq->rx_tail > rxq->rx_free_trigger) {
1687 if (ice_rx_alloc_bufs(rxq) != 0) {
1690 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1691 dev->data->rx_mbuf_alloc_failed +=
1692 rxq->rx_free_thresh;
1693 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1694 "port_id=%u, queue_id=%u",
1695 rxq->port_id, rxq->queue_id);
1696 rxq->rx_nb_avail = 0;
1697 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1698 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1699 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1705 if (rxq->rx_tail >= rxq->nb_rx_desc)
1708 if (rxq->rx_nb_avail)
1709 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1715 ice_recv_pkts_bulk_alloc(void *rx_queue,
1716 struct rte_mbuf **rx_pkts,
1723 if (unlikely(nb_pkts == 0))
1726 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1727 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1730 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1731 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1732 nb_rx = (uint16_t)(nb_rx + count);
1733 nb_pkts = (uint16_t)(nb_pkts - count);
1742 ice_recv_scattered_pkts(void *rx_queue,
1743 struct rte_mbuf **rx_pkts,
1746 struct ice_rx_queue *rxq = rx_queue;
1747 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1748 volatile union ice_rx_flex_desc *rxdp;
1749 union ice_rx_flex_desc rxd;
1750 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1751 struct ice_rx_entry *rxe;
1752 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1753 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1754 struct rte_mbuf *nmb; /* new allocated mbuf */
1755 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1756 uint16_t rx_id = rxq->rx_tail;
1758 uint16_t nb_hold = 0;
1759 uint16_t rx_packet_len;
1760 uint16_t rx_stat_err0;
1763 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1764 struct rte_eth_dev *dev;
1766 while (nb_rx < nb_pkts) {
1767 rxdp = &rx_ring[rx_id];
1768 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1770 /* Check the DD bit first */
1771 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1775 nmb = rte_mbuf_raw_alloc(rxq->mp);
1776 if (unlikely(!nmb)) {
1777 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1778 dev->data->rx_mbuf_alloc_failed++;
1781 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1784 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1786 if (unlikely(rx_id == rxq->nb_rx_desc))
1789 /* Prefetch next mbuf */
1790 rte_prefetch0(sw_ring[rx_id].mbuf);
1793 * When next RX descriptor is on a cache line boundary,
1794 * prefetch the next 4 RX descriptors and next 8 pointers
1797 if ((rx_id & 0x3) == 0) {
1798 rte_prefetch0(&rx_ring[rx_id]);
1799 rte_prefetch0(&sw_ring[rx_id]);
1805 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1807 /* Set data buffer address and data length of the mbuf */
1808 rxdp->read.hdr_addr = 0;
1809 rxdp->read.pkt_addr = dma_addr;
1810 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1811 ICE_RX_FLX_DESC_PKT_LEN_M;
1812 rxm->data_len = rx_packet_len;
1813 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1816 * If this is the first buffer of the received packet, set the
1817 * pointer to the first mbuf of the packet and initialize its
1818 * context. Otherwise, update the total length and the number
1819 * of segments of the current scattered packet, and update the
1820 * pointer to the last mbuf of the current packet.
1824 first_seg->nb_segs = 1;
1825 first_seg->pkt_len = rx_packet_len;
1827 first_seg->pkt_len =
1828 (uint16_t)(first_seg->pkt_len +
1830 first_seg->nb_segs++;
1831 last_seg->next = rxm;
1835 * If this is not the last buffer of the received packet,
1836 * update the pointer to the last mbuf of the current scattered
1837 * packet and continue to parse the RX ring.
1839 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1845 * This is the last buffer of the received packet. If the CRC
1846 * is not stripped by the hardware:
1847 * - Subtract the CRC length from the total packet length.
1848 * - If the last buffer only contains the whole CRC or a part
1849 * of it, free the mbuf associated to the last buffer. If part
1850 * of the CRC is also contained in the previous mbuf, subtract
1851 * the length of that CRC part from the data length of the
1855 if (unlikely(rxq->crc_len > 0)) {
1856 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1857 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1858 rte_pktmbuf_free_seg(rxm);
1859 first_seg->nb_segs--;
1860 last_seg->data_len =
1861 (uint16_t)(last_seg->data_len -
1862 (RTE_ETHER_CRC_LEN - rx_packet_len));
1863 last_seg->next = NULL;
1865 rxm->data_len = (uint16_t)(rx_packet_len -
1869 first_seg->port = rxq->port_id;
1870 first_seg->ol_flags = 0;
1871 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1872 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1873 ice_rxd_to_vlan_tci(first_seg, &rxd);
1874 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1875 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1876 first_seg->ol_flags |= pkt_flags;
1877 /* Prefetch data of first segment, if configured to do so. */
1878 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1879 first_seg->data_off));
1880 rx_pkts[nb_rx++] = first_seg;
1884 /* Record index of the next RX descriptor to probe. */
1885 rxq->rx_tail = rx_id;
1886 rxq->pkt_first_seg = first_seg;
1887 rxq->pkt_last_seg = last_seg;
1890 * If the number of free RX descriptors is greater than the RX free
1891 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1892 * register. Update the RDT with the value of the last processed RX
1893 * descriptor minus 1, to guarantee that the RDT register is never
1894 * equal to the RDH register, which creates a "full" ring situtation
1895 * from the hardware point of view.
1897 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1898 if (nb_hold > rxq->rx_free_thresh) {
1899 rx_id = (uint16_t)(rx_id == 0 ?
1900 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1901 /* write TAIL register */
1902 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1905 rxq->nb_rx_hold = nb_hold;
1907 /* return received packet in the burst */
1912 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1914 struct ice_adapter *ad =
1915 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1916 const uint32_t *ptypes;
1918 static const uint32_t ptypes_os[] = {
1919 /* refers to ice_get_default_pkt_type() */
1921 RTE_PTYPE_L2_ETHER_TIMESYNC,
1922 RTE_PTYPE_L2_ETHER_LLDP,
1923 RTE_PTYPE_L2_ETHER_ARP,
1924 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1925 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1928 RTE_PTYPE_L4_NONFRAG,
1932 RTE_PTYPE_TUNNEL_GRENAT,
1933 RTE_PTYPE_TUNNEL_IP,
1934 RTE_PTYPE_INNER_L2_ETHER,
1935 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1936 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1937 RTE_PTYPE_INNER_L4_FRAG,
1938 RTE_PTYPE_INNER_L4_ICMP,
1939 RTE_PTYPE_INNER_L4_NONFRAG,
1940 RTE_PTYPE_INNER_L4_SCTP,
1941 RTE_PTYPE_INNER_L4_TCP,
1942 RTE_PTYPE_INNER_L4_UDP,
1946 static const uint32_t ptypes_comms[] = {
1947 /* refers to ice_get_default_pkt_type() */
1949 RTE_PTYPE_L2_ETHER_TIMESYNC,
1950 RTE_PTYPE_L2_ETHER_LLDP,
1951 RTE_PTYPE_L2_ETHER_ARP,
1952 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1953 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1956 RTE_PTYPE_L4_NONFRAG,
1960 RTE_PTYPE_TUNNEL_GRENAT,
1961 RTE_PTYPE_TUNNEL_IP,
1962 RTE_PTYPE_INNER_L2_ETHER,
1963 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1964 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1965 RTE_PTYPE_INNER_L4_FRAG,
1966 RTE_PTYPE_INNER_L4_ICMP,
1967 RTE_PTYPE_INNER_L4_NONFRAG,
1968 RTE_PTYPE_INNER_L4_SCTP,
1969 RTE_PTYPE_INNER_L4_TCP,
1970 RTE_PTYPE_INNER_L4_UDP,
1971 RTE_PTYPE_TUNNEL_GTPC,
1972 RTE_PTYPE_TUNNEL_GTPU,
1973 RTE_PTYPE_L2_ETHER_PPPOE,
1977 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1978 ptypes = ptypes_comms;
1982 if (dev->rx_pkt_burst == ice_recv_pkts ||
1983 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1984 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1988 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1989 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1990 #ifdef CC_AVX512_SUPPORT
1991 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
1992 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
1994 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1995 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
2003 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2005 volatile union ice_rx_flex_desc *rxdp;
2006 struct ice_rx_queue *rxq = rx_queue;
2009 if (unlikely(offset >= rxq->nb_rx_desc))
2012 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2013 return RTE_ETH_RX_DESC_UNAVAIL;
2015 desc = rxq->rx_tail + offset;
2016 if (desc >= rxq->nb_rx_desc)
2017 desc -= rxq->nb_rx_desc;
2019 rxdp = &rxq->rx_ring[desc];
2020 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2021 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2022 return RTE_ETH_RX_DESC_DONE;
2024 return RTE_ETH_RX_DESC_AVAIL;
2028 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2030 struct ice_tx_queue *txq = tx_queue;
2031 volatile uint64_t *status;
2032 uint64_t mask, expect;
2035 if (unlikely(offset >= txq->nb_tx_desc))
2038 desc = txq->tx_tail + offset;
2039 /* go to next desc that has the RS bit */
2040 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2042 if (desc >= txq->nb_tx_desc) {
2043 desc -= txq->nb_tx_desc;
2044 if (desc >= txq->nb_tx_desc)
2045 desc -= txq->nb_tx_desc;
2048 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2049 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2050 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2051 ICE_TXD_QW1_DTYPE_S);
2052 if ((*status & mask) == expect)
2053 return RTE_ETH_TX_DESC_DONE;
2055 return RTE_ETH_TX_DESC_FULL;
2059 ice_free_queues(struct rte_eth_dev *dev)
2063 PMD_INIT_FUNC_TRACE();
2065 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2066 if (!dev->data->rx_queues[i])
2068 ice_rx_queue_release(dev->data->rx_queues[i]);
2069 dev->data->rx_queues[i] = NULL;
2070 rte_eth_dma_zone_free(dev, "rx_ring", i);
2072 dev->data->nb_rx_queues = 0;
2074 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2075 if (!dev->data->tx_queues[i])
2077 ice_tx_queue_release(dev->data->tx_queues[i]);
2078 dev->data->tx_queues[i] = NULL;
2079 rte_eth_dma_zone_free(dev, "tx_ring", i);
2081 dev->data->nb_tx_queues = 0;
2084 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
2085 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
2088 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2090 struct ice_tx_queue *txq;
2091 const struct rte_memzone *tz = NULL;
2093 struct rte_eth_dev *dev;
2096 PMD_DRV_LOG(ERR, "PF is not available");
2100 dev = pf->adapter->eth_dev;
2102 /* Allocate the TX queue data structure. */
2103 txq = rte_zmalloc_socket("ice fdir tx queue",
2104 sizeof(struct ice_tx_queue),
2105 RTE_CACHE_LINE_SIZE,
2108 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2109 "tx queue structure.");
2113 /* Allocate TX hardware ring descriptors. */
2114 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2115 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2117 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2118 ICE_FDIR_QUEUE_ID, ring_size,
2119 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2121 ice_tx_queue_release(txq);
2122 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2126 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2127 txq->queue_id = ICE_FDIR_QUEUE_ID;
2128 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2129 txq->vsi = pf->fdir.fdir_vsi;
2131 txq->tx_ring_dma = tz->iova;
2132 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2134 * don't need to allocate software ring and reset for the fdir
2135 * program queue just set the queue has been configured.
2140 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2146 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2148 struct ice_rx_queue *rxq;
2149 const struct rte_memzone *rz = NULL;
2151 struct rte_eth_dev *dev;
2154 PMD_DRV_LOG(ERR, "PF is not available");
2158 dev = pf->adapter->eth_dev;
2160 /* Allocate the RX queue data structure. */
2161 rxq = rte_zmalloc_socket("ice fdir rx queue",
2162 sizeof(struct ice_rx_queue),
2163 RTE_CACHE_LINE_SIZE,
2166 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2167 "rx queue structure.");
2171 /* Allocate RX hardware ring descriptors. */
2172 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2173 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2175 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2176 ICE_FDIR_QUEUE_ID, ring_size,
2177 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2179 ice_rx_queue_release(rxq);
2180 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2184 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2185 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2186 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2187 rxq->vsi = pf->fdir.fdir_vsi;
2189 rxq->rx_ring_dma = rz->iova;
2190 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2191 sizeof(union ice_32byte_rx_desc));
2192 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2195 * Don't need to allocate software ring and reset for the fdir
2196 * rx queue, just set the queue has been configured.
2201 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2207 ice_recv_pkts(void *rx_queue,
2208 struct rte_mbuf **rx_pkts,
2211 struct ice_rx_queue *rxq = rx_queue;
2212 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2213 volatile union ice_rx_flex_desc *rxdp;
2214 union ice_rx_flex_desc rxd;
2215 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2216 struct ice_rx_entry *rxe;
2217 struct rte_mbuf *nmb; /* new allocated mbuf */
2218 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2219 uint16_t rx_id = rxq->rx_tail;
2221 uint16_t nb_hold = 0;
2222 uint16_t rx_packet_len;
2223 uint16_t rx_stat_err0;
2226 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2227 struct rte_eth_dev *dev;
2229 while (nb_rx < nb_pkts) {
2230 rxdp = &rx_ring[rx_id];
2231 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2233 /* Check the DD bit first */
2234 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2238 nmb = rte_mbuf_raw_alloc(rxq->mp);
2239 if (unlikely(!nmb)) {
2240 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2241 dev->data->rx_mbuf_alloc_failed++;
2244 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2247 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2249 if (unlikely(rx_id == rxq->nb_rx_desc))
2254 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2257 * fill the read format of descriptor with physic address in
2258 * new allocated mbuf: nmb
2260 rxdp->read.hdr_addr = 0;
2261 rxdp->read.pkt_addr = dma_addr;
2263 /* calculate rx_packet_len of the received pkt */
2264 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2265 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2267 /* fill old mbuf with received descriptor: rxd */
2268 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2269 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2272 rxm->pkt_len = rx_packet_len;
2273 rxm->data_len = rx_packet_len;
2274 rxm->port = rxq->port_id;
2275 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2276 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2277 ice_rxd_to_vlan_tci(rxm, &rxd);
2278 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
2279 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2280 rxm->ol_flags |= pkt_flags;
2281 /* copy old mbuf to rx_pkts */
2282 rx_pkts[nb_rx++] = rxm;
2284 rxq->rx_tail = rx_id;
2286 * If the number of free RX descriptors is greater than the RX free
2287 * threshold of the queue, advance the receive tail register of queue.
2288 * Update that register with the value of the last processed RX
2289 * descriptor minus 1.
2291 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2292 if (nb_hold > rxq->rx_free_thresh) {
2293 rx_id = (uint16_t)(rx_id == 0 ?
2294 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2295 /* write TAIL register */
2296 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2299 rxq->nb_rx_hold = nb_hold;
2301 /* return received packet in the burst */
2306 ice_parse_tunneling_params(uint64_t ol_flags,
2307 union ice_tx_offload tx_offload,
2308 uint32_t *cd_tunneling)
2310 /* EIPT: External (outer) IP header type */
2311 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2312 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2313 else if (ol_flags & PKT_TX_OUTER_IPV4)
2314 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2315 else if (ol_flags & PKT_TX_OUTER_IPV6)
2316 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2318 /* EIPLEN: External (outer) IP header length, in DWords */
2319 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2320 ICE_TXD_CTX_QW0_EIPLEN_S;
2322 /* L4TUNT: L4 Tunneling Type */
2323 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2324 case PKT_TX_TUNNEL_IPIP:
2325 /* for non UDP / GRE tunneling, set to 00b */
2327 case PKT_TX_TUNNEL_VXLAN:
2328 case PKT_TX_TUNNEL_GTP:
2329 case PKT_TX_TUNNEL_GENEVE:
2330 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2332 case PKT_TX_TUNNEL_GRE:
2333 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2336 PMD_TX_LOG(ERR, "Tunnel type not supported");
2340 /* L4TUNLEN: L4 Tunneling Length, in Words
2342 * We depend on app to set rte_mbuf.l2_len correctly.
2343 * For IP in GRE it should be set to the length of the GRE
2345 * For MAC in GRE or MAC in UDP it should be set to the length
2346 * of the GRE or UDP headers plus the inner MAC up to including
2347 * its last Ethertype.
2348 * If MPLS labels exists, it should include them as well.
2350 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2351 ICE_TXD_CTX_QW0_NATLEN_S;
2354 * Calculate the tunneling UDP checksum.
2355 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2357 if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
2358 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2359 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2363 ice_txd_enable_checksum(uint64_t ol_flags,
2365 uint32_t *td_offset,
2366 union ice_tx_offload tx_offload)
2369 if (ol_flags & PKT_TX_TUNNEL_MASK)
2370 *td_offset |= (tx_offload.outer_l2_len >> 1)
2371 << ICE_TX_DESC_LEN_MACLEN_S;
2373 *td_offset |= (tx_offload.l2_len >> 1)
2374 << ICE_TX_DESC_LEN_MACLEN_S;
2376 /* Enable L3 checksum offloads */
2377 if (ol_flags & PKT_TX_IP_CKSUM) {
2378 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2379 *td_offset |= (tx_offload.l3_len >> 2) <<
2380 ICE_TX_DESC_LEN_IPLEN_S;
2381 } else if (ol_flags & PKT_TX_IPV4) {
2382 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2383 *td_offset |= (tx_offload.l3_len >> 2) <<
2384 ICE_TX_DESC_LEN_IPLEN_S;
2385 } else if (ol_flags & PKT_TX_IPV6) {
2386 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2387 *td_offset |= (tx_offload.l3_len >> 2) <<
2388 ICE_TX_DESC_LEN_IPLEN_S;
2391 if (ol_flags & PKT_TX_TCP_SEG) {
2392 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2393 *td_offset |= (tx_offload.l4_len >> 2) <<
2394 ICE_TX_DESC_LEN_L4_LEN_S;
2398 /* Enable L4 checksum offloads */
2399 switch (ol_flags & PKT_TX_L4_MASK) {
2400 case PKT_TX_TCP_CKSUM:
2401 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2402 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2403 ICE_TX_DESC_LEN_L4_LEN_S;
2405 case PKT_TX_SCTP_CKSUM:
2406 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2407 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2408 ICE_TX_DESC_LEN_L4_LEN_S;
2410 case PKT_TX_UDP_CKSUM:
2411 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2412 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2413 ICE_TX_DESC_LEN_L4_LEN_S;
2421 ice_xmit_cleanup(struct ice_tx_queue *txq)
2423 struct ice_tx_entry *sw_ring = txq->sw_ring;
2424 volatile struct ice_tx_desc *txd = txq->tx_ring;
2425 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2426 uint16_t nb_tx_desc = txq->nb_tx_desc;
2427 uint16_t desc_to_clean_to;
2428 uint16_t nb_tx_to_clean;
2430 /* Determine the last descriptor needing to be cleaned */
2431 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2432 if (desc_to_clean_to >= nb_tx_desc)
2433 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2435 /* Check to make sure the last descriptor to clean is done */
2436 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2437 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2438 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2439 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
2440 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2442 txq->port_id, txq->queue_id,
2443 txd[desc_to_clean_to].cmd_type_offset_bsz);
2444 /* Failed to clean any descriptors */
2448 /* Figure out how many descriptors will be cleaned */
2449 if (last_desc_cleaned > desc_to_clean_to)
2450 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2453 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2456 /* The last descriptor to clean is done, so that means all the
2457 * descriptors from the last descriptor that was cleaned
2458 * up to the last descriptor with the RS bit set
2459 * are done. Only reset the threshold descriptor.
2461 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2463 /* Update the txq to reflect the last descriptor that was cleaned */
2464 txq->last_desc_cleaned = desc_to_clean_to;
2465 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2470 /* Construct the tx flags */
2471 static inline uint64_t
2472 ice_build_ctob(uint32_t td_cmd,
2477 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2478 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2479 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2480 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2481 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2484 /* Check if the context descriptor is needed for TX offloading */
2485 static inline uint16_t
2486 ice_calc_context_desc(uint64_t flags)
2488 static uint64_t mask = PKT_TX_TCP_SEG |
2490 PKT_TX_OUTER_IP_CKSUM |
2493 return (flags & mask) ? 1 : 0;
2496 /* set ice TSO context descriptor */
2497 static inline uint64_t
2498 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2500 uint64_t ctx_desc = 0;
2501 uint32_t cd_cmd, hdr_len, cd_tso_len;
2503 if (!tx_offload.l4_len) {
2504 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2508 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2509 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2510 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2512 cd_cmd = ICE_TX_CTX_DESC_TSO;
2513 cd_tso_len = mbuf->pkt_len - hdr_len;
2514 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2515 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2516 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2521 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2522 #define ICE_MAX_DATA_PER_TXD \
2523 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2524 /* Calculate the number of TX descriptors needed for each pkt */
2525 static inline uint16_t
2526 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2528 struct rte_mbuf *txd = tx_pkt;
2531 while (txd != NULL) {
2532 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2540 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2542 struct ice_tx_queue *txq;
2543 volatile struct ice_tx_desc *tx_ring;
2544 volatile struct ice_tx_desc *txd;
2545 struct ice_tx_entry *sw_ring;
2546 struct ice_tx_entry *txe, *txn;
2547 struct rte_mbuf *tx_pkt;
2548 struct rte_mbuf *m_seg;
2549 uint32_t cd_tunneling_params;
2554 uint32_t td_cmd = 0;
2555 uint32_t td_offset = 0;
2556 uint32_t td_tag = 0;
2559 uint64_t buf_dma_addr;
2561 union ice_tx_offload tx_offload = {0};
2564 sw_ring = txq->sw_ring;
2565 tx_ring = txq->tx_ring;
2566 tx_id = txq->tx_tail;
2567 txe = &sw_ring[tx_id];
2569 /* Check if the descriptor ring needs to be cleaned. */
2570 if (txq->nb_tx_free < txq->tx_free_thresh)
2571 (void)ice_xmit_cleanup(txq);
2573 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2574 tx_pkt = *tx_pkts++;
2579 ol_flags = tx_pkt->ol_flags;
2580 tx_offload.l2_len = tx_pkt->l2_len;
2581 tx_offload.l3_len = tx_pkt->l3_len;
2582 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2583 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2584 tx_offload.l4_len = tx_pkt->l4_len;
2585 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2586 /* Calculate the number of context descriptors needed. */
2587 nb_ctx = ice_calc_context_desc(ol_flags);
2589 /* The number of descriptors that must be allocated for
2590 * a packet equals to the number of the segments of that
2591 * packet plus the number of context descriptor if needed.
2592 * Recalculate the needed tx descs when TSO enabled in case
2593 * the mbuf data size exceeds max data size that hw allows
2596 if (ol_flags & PKT_TX_TCP_SEG)
2597 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2600 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2601 tx_last = (uint16_t)(tx_id + nb_used - 1);
2604 if (tx_last >= txq->nb_tx_desc)
2605 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2607 if (nb_used > txq->nb_tx_free) {
2608 if (ice_xmit_cleanup(txq) != 0) {
2613 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2614 while (nb_used > txq->nb_tx_free) {
2615 if (ice_xmit_cleanup(txq) != 0) {
2624 /* Descriptor based VLAN insertion */
2625 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2626 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2627 td_tag = tx_pkt->vlan_tci;
2630 /* Fill in tunneling parameters if necessary */
2631 cd_tunneling_params = 0;
2632 if (ol_flags & PKT_TX_TUNNEL_MASK)
2633 ice_parse_tunneling_params(ol_flags, tx_offload,
2634 &cd_tunneling_params);
2636 /* Enable checksum offloading */
2637 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2638 ice_txd_enable_checksum(ol_flags, &td_cmd,
2639 &td_offset, tx_offload);
2642 /* Setup TX context descriptor if required */
2643 volatile struct ice_tx_ctx_desc *ctx_txd =
2644 (volatile struct ice_tx_ctx_desc *)
2646 uint16_t cd_l2tag2 = 0;
2647 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2649 txn = &sw_ring[txe->next_id];
2650 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2652 rte_pktmbuf_free_seg(txe->mbuf);
2656 if (ol_flags & PKT_TX_TCP_SEG)
2657 cd_type_cmd_tso_mss |=
2658 ice_set_tso_ctx(tx_pkt, tx_offload);
2660 ctx_txd->tunneling_params =
2661 rte_cpu_to_le_32(cd_tunneling_params);
2663 /* TX context descriptor based double VLAN insert */
2664 if (ol_flags & PKT_TX_QINQ) {
2665 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2666 cd_type_cmd_tso_mss |=
2667 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2668 ICE_TXD_CTX_QW1_CMD_S);
2670 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2672 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2674 txe->last_id = tx_last;
2675 tx_id = txe->next_id;
2681 txd = &tx_ring[tx_id];
2682 txn = &sw_ring[txe->next_id];
2685 rte_pktmbuf_free_seg(txe->mbuf);
2688 /* Setup TX Descriptor */
2689 slen = m_seg->data_len;
2690 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2692 while ((ol_flags & PKT_TX_TCP_SEG) &&
2693 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2694 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2695 txd->cmd_type_offset_bsz =
2696 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2697 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2698 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2699 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2700 ICE_TXD_QW1_TX_BUF_SZ_S) |
2701 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2703 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2704 slen -= ICE_MAX_DATA_PER_TXD;
2706 txe->last_id = tx_last;
2707 tx_id = txe->next_id;
2709 txd = &tx_ring[tx_id];
2710 txn = &sw_ring[txe->next_id];
2713 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2714 txd->cmd_type_offset_bsz =
2715 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2716 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2717 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2718 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2719 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2721 txe->last_id = tx_last;
2722 tx_id = txe->next_id;
2724 m_seg = m_seg->next;
2727 /* fill the last descriptor with End of Packet (EOP) bit */
2728 td_cmd |= ICE_TX_DESC_CMD_EOP;
2729 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2730 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2732 /* set RS bit on the last descriptor of one packet */
2733 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2734 PMD_TX_FREE_LOG(DEBUG,
2735 "Setting RS bit on TXD id="
2736 "%4u (port=%d queue=%d)",
2737 tx_last, txq->port_id, txq->queue_id);
2739 td_cmd |= ICE_TX_DESC_CMD_RS;
2741 /* Update txq RS bit counters */
2742 txq->nb_tx_used = 0;
2744 txd->cmd_type_offset_bsz |=
2745 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2749 /* update Tail register */
2750 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2751 txq->tx_tail = tx_id;
2756 static __rte_always_inline int
2757 ice_tx_free_bufs(struct ice_tx_queue *txq)
2759 struct ice_tx_entry *txep;
2762 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2763 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2764 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2767 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2769 for (i = 0; i < txq->tx_rs_thresh; i++)
2770 rte_prefetch0((txep + i)->mbuf);
2772 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2773 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2774 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2778 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2779 rte_pktmbuf_free_seg(txep->mbuf);
2784 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2785 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2786 if (txq->tx_next_dd >= txq->nb_tx_desc)
2787 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2789 return txq->tx_rs_thresh;
2793 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2796 struct ice_tx_entry *swr_ring = txq->sw_ring;
2797 uint16_t i, tx_last, tx_id;
2798 uint16_t nb_tx_free_last;
2799 uint16_t nb_tx_to_clean;
2802 /* Start free mbuf from the next of tx_tail */
2803 tx_last = txq->tx_tail;
2804 tx_id = swr_ring[tx_last].next_id;
2806 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2809 nb_tx_to_clean = txq->nb_tx_free;
2810 nb_tx_free_last = txq->nb_tx_free;
2812 free_cnt = txq->nb_tx_desc;
2814 /* Loop through swr_ring to count the amount of
2815 * freeable mubfs and packets.
2817 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2818 for (i = 0; i < nb_tx_to_clean &&
2819 pkt_cnt < free_cnt &&
2820 tx_id != tx_last; i++) {
2821 if (swr_ring[tx_id].mbuf != NULL) {
2822 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2823 swr_ring[tx_id].mbuf = NULL;
2826 * last segment in the packet,
2827 * increment packet count
2829 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2832 tx_id = swr_ring[tx_id].next_id;
2835 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2836 txq->nb_tx_free || tx_id == tx_last)
2839 if (pkt_cnt < free_cnt) {
2840 if (ice_xmit_cleanup(txq))
2843 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2844 nb_tx_free_last = txq->nb_tx_free;
2848 return (int)pkt_cnt;
2853 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2854 uint32_t free_cnt __rte_unused)
2861 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2866 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2867 free_cnt = txq->nb_tx_desc;
2869 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2871 for (i = 0; i < cnt; i += n) {
2872 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2875 n = ice_tx_free_bufs(txq);
2885 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2887 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2888 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2889 struct ice_adapter *ad =
2890 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2893 if (ad->tx_vec_allowed)
2894 return ice_tx_done_cleanup_vec(q, free_cnt);
2896 if (ad->tx_simple_allowed)
2897 return ice_tx_done_cleanup_simple(q, free_cnt);
2899 return ice_tx_done_cleanup_full(q, free_cnt);
2902 /* Populate 4 descriptors with data from 4 mbufs */
2904 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2909 for (i = 0; i < 4; i++, txdp++, pkts++) {
2910 dma_addr = rte_mbuf_data_iova(*pkts);
2911 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2912 txdp->cmd_type_offset_bsz =
2913 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2914 (*pkts)->data_len, 0);
2918 /* Populate 1 descriptor with data from 1 mbuf */
2920 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2924 dma_addr = rte_mbuf_data_iova(*pkts);
2925 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2926 txdp->cmd_type_offset_bsz =
2927 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2928 (*pkts)->data_len, 0);
2932 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2935 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2936 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2937 const int N_PER_LOOP = 4;
2938 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2939 int mainpart, leftover;
2943 * Process most of the packets in chunks of N pkts. Any
2944 * leftover packets will get processed one at a time.
2946 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2947 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2948 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2949 /* Copy N mbuf pointers to the S/W ring */
2950 for (j = 0; j < N_PER_LOOP; ++j)
2951 (txep + i + j)->mbuf = *(pkts + i + j);
2952 tx4(txdp + i, pkts + i);
2955 if (unlikely(leftover > 0)) {
2956 for (i = 0; i < leftover; ++i) {
2957 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2958 tx1(txdp + mainpart + i, pkts + mainpart + i);
2963 static inline uint16_t
2964 tx_xmit_pkts(struct ice_tx_queue *txq,
2965 struct rte_mbuf **tx_pkts,
2968 volatile struct ice_tx_desc *txr = txq->tx_ring;
2972 * Begin scanning the H/W ring for done descriptors when the number
2973 * of available descriptors drops below tx_free_thresh. For each done
2974 * descriptor, free the associated buffer.
2976 if (txq->nb_tx_free < txq->tx_free_thresh)
2977 ice_tx_free_bufs(txq);
2979 /* Use available descriptor only */
2980 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2981 if (unlikely(!nb_pkts))
2984 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2985 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2986 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2987 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2988 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2989 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2991 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2995 /* Fill hardware descriptor ring with mbuf data */
2996 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2997 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2999 /* Determin if RS bit needs to be set */
3000 if (txq->tx_tail > txq->tx_next_rs) {
3001 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3002 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3005 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3006 if (txq->tx_next_rs >= txq->nb_tx_desc)
3007 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3010 if (txq->tx_tail >= txq->nb_tx_desc)
3013 /* Update the tx tail register */
3014 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3020 ice_xmit_pkts_simple(void *tx_queue,
3021 struct rte_mbuf **tx_pkts,
3026 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3027 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3031 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3034 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3035 &tx_pkts[nb_tx], num);
3036 nb_tx = (uint16_t)(nb_tx + ret);
3037 nb_pkts = (uint16_t)(nb_pkts - ret);
3046 ice_set_rx_function(struct rte_eth_dev *dev)
3048 PMD_INIT_FUNC_TRACE();
3049 struct ice_adapter *ad =
3050 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3052 struct ice_rx_queue *rxq;
3054 bool use_avx512 = false;
3055 bool use_avx2 = false;
3057 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3058 if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
3059 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3060 ad->rx_vec_allowed = true;
3061 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3062 rxq = dev->data->rx_queues[i];
3063 if (rxq && ice_rxq_vec_setup(rxq)) {
3064 ad->rx_vec_allowed = false;
3069 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3070 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3071 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3072 #ifdef CC_AVX512_SUPPORT
3076 "AVX512 is not supported in build env");
3079 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3080 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3081 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3085 ad->rx_vec_allowed = false;
3089 if (ad->rx_vec_allowed) {
3090 if (dev->data->scattered_rx) {
3092 #ifdef CC_AVX512_SUPPORT
3094 "Using AVX512 Vector Scattered Rx (port %d).",
3095 dev->data->port_id);
3097 ice_recv_scattered_pkts_vec_avx512;
3101 "Using %sVector Scattered Rx (port %d).",
3102 use_avx2 ? "avx2 " : "",
3103 dev->data->port_id);
3104 dev->rx_pkt_burst = use_avx2 ?
3105 ice_recv_scattered_pkts_vec_avx2 :
3106 ice_recv_scattered_pkts_vec;
3110 #ifdef CC_AVX512_SUPPORT
3112 "Using AVX512 Vector Rx (port %d).",
3113 dev->data->port_id);
3115 ice_recv_pkts_vec_avx512;
3119 "Using %sVector Rx (port %d).",
3120 use_avx2 ? "avx2 " : "",
3121 dev->data->port_id);
3122 dev->rx_pkt_burst = use_avx2 ?
3123 ice_recv_pkts_vec_avx2 :
3132 if (dev->data->scattered_rx) {
3133 /* Set the non-LRO scattered function */
3135 "Using a Scattered function on port %d.",
3136 dev->data->port_id);
3137 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3138 } else if (ad->rx_bulk_alloc_allowed) {
3140 "Rx Burst Bulk Alloc Preconditions are "
3141 "satisfied. Rx Burst Bulk Alloc function "
3142 "will be used on port %d.",
3143 dev->data->port_id);
3144 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3147 "Rx Burst Bulk Alloc Preconditions are not "
3148 "satisfied, Normal Rx will be used on port %d.",
3149 dev->data->port_id);
3150 dev->rx_pkt_burst = ice_recv_pkts;
3154 static const struct {
3155 eth_rx_burst_t pkt_burst;
3157 } ice_rx_burst_infos[] = {
3158 { ice_recv_scattered_pkts, "Scalar Scattered" },
3159 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3160 { ice_recv_pkts, "Scalar" },
3162 #ifdef CC_AVX512_SUPPORT
3163 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3164 { ice_recv_pkts_vec_avx512, "Vector AVX512" },
3166 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3167 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3168 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3169 { ice_recv_pkts_vec, "Vector SSE" },
3174 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3175 struct rte_eth_burst_mode *mode)
3177 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3181 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3182 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3183 snprintf(mode->info, sizeof(mode->info), "%s",
3184 ice_rx_burst_infos[i].info);
3194 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3196 struct ice_adapter *ad =
3197 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3199 /* Use a simple Tx queue if possible (only fast free is allowed) */
3200 ad->tx_simple_allowed =
3202 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3203 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3205 if (ad->tx_simple_allowed)
3206 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3210 "Simple Tx can NOT be enabled on Tx queue %u.",
3214 /*********************************************************************
3218 **********************************************************************/
3219 /* The default values of TSO MSS */
3220 #define ICE_MIN_TSO_MSS 64
3221 #define ICE_MAX_TSO_MSS 9728
3222 #define ICE_MAX_TSO_FRAME_SIZE 262144
3224 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3231 for (i = 0; i < nb_pkts; i++) {
3233 ol_flags = m->ol_flags;
3235 if (ol_flags & PKT_TX_TCP_SEG &&
3236 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3237 m->tso_segsz > ICE_MAX_TSO_MSS ||
3238 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3240 * MSS outside the range are considered malicious
3246 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3247 ret = rte_validate_tx_offload(m);
3253 ret = rte_net_intel_cksum_prepare(m);
3263 ice_set_tx_function(struct rte_eth_dev *dev)
3265 struct ice_adapter *ad =
3266 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3268 struct ice_tx_queue *txq;
3270 bool use_avx512 = false;
3271 bool use_avx2 = false;
3273 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3274 if (!ice_tx_vec_dev_check(dev) &&
3275 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3276 ad->tx_vec_allowed = true;
3277 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3278 txq = dev->data->tx_queues[i];
3279 if (txq && ice_txq_vec_setup(txq)) {
3280 ad->tx_vec_allowed = false;
3285 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3286 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3287 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3288 #ifdef CC_AVX512_SUPPORT
3292 "AVX512 is not supported in build env");
3295 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3296 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3297 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3301 ad->tx_vec_allowed = false;
3305 if (ad->tx_vec_allowed) {
3307 #ifdef CC_AVX512_SUPPORT
3308 PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
3309 dev->data->port_id);
3310 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3313 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3314 use_avx2 ? "avx2 " : "",
3315 dev->data->port_id);
3316 dev->tx_pkt_burst = use_avx2 ?
3317 ice_xmit_pkts_vec_avx2 :
3320 dev->tx_pkt_prepare = NULL;
3326 if (ad->tx_simple_allowed) {
3327 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3328 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3329 dev->tx_pkt_prepare = NULL;
3331 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3332 dev->tx_pkt_burst = ice_xmit_pkts;
3333 dev->tx_pkt_prepare = ice_prep_pkts;
3337 static const struct {
3338 eth_tx_burst_t pkt_burst;
3340 } ice_tx_burst_infos[] = {
3341 { ice_xmit_pkts_simple, "Scalar Simple" },
3342 { ice_xmit_pkts, "Scalar" },
3344 #ifdef CC_AVX512_SUPPORT
3345 { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3347 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3348 { ice_xmit_pkts_vec, "Vector SSE" },
3353 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3354 struct rte_eth_burst_mode *mode)
3356 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3360 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3361 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3362 snprintf(mode->info, sizeof(mode->info), "%s",
3363 ice_tx_burst_infos[i].info);
3372 /* For each value it means, datasheet of hardware can tell more details
3374 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3376 static inline uint32_t
3377 ice_get_default_pkt_type(uint16_t ptype)
3379 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3380 __rte_cache_aligned = {
3383 [1] = RTE_PTYPE_L2_ETHER,
3384 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3385 /* [3] - [5] reserved */
3386 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3387 /* [7] - [10] reserved */
3388 [11] = RTE_PTYPE_L2_ETHER_ARP,
3389 /* [12] - [21] reserved */
3391 /* Non tunneled IPv4 */
3392 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3394 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3395 RTE_PTYPE_L4_NONFRAG,
3396 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3399 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3401 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3403 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3407 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3408 RTE_PTYPE_TUNNEL_IP |
3409 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3410 RTE_PTYPE_INNER_L4_FRAG,
3411 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3412 RTE_PTYPE_TUNNEL_IP |
3413 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3414 RTE_PTYPE_INNER_L4_NONFRAG,
3415 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3416 RTE_PTYPE_TUNNEL_IP |
3417 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3418 RTE_PTYPE_INNER_L4_UDP,
3420 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3421 RTE_PTYPE_TUNNEL_IP |
3422 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3423 RTE_PTYPE_INNER_L4_TCP,
3424 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3425 RTE_PTYPE_TUNNEL_IP |
3426 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3427 RTE_PTYPE_INNER_L4_SCTP,
3428 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3429 RTE_PTYPE_TUNNEL_IP |
3430 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3431 RTE_PTYPE_INNER_L4_ICMP,
3434 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3435 RTE_PTYPE_TUNNEL_IP |
3436 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3437 RTE_PTYPE_INNER_L4_FRAG,
3438 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3439 RTE_PTYPE_TUNNEL_IP |
3440 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3441 RTE_PTYPE_INNER_L4_NONFRAG,
3442 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3443 RTE_PTYPE_TUNNEL_IP |
3444 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3445 RTE_PTYPE_INNER_L4_UDP,
3447 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3448 RTE_PTYPE_TUNNEL_IP |
3449 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3450 RTE_PTYPE_INNER_L4_TCP,
3451 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3452 RTE_PTYPE_TUNNEL_IP |
3453 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3454 RTE_PTYPE_INNER_L4_SCTP,
3455 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3456 RTE_PTYPE_TUNNEL_IP |
3457 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3458 RTE_PTYPE_INNER_L4_ICMP,
3460 /* IPv4 --> GRE/Teredo/VXLAN */
3461 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3462 RTE_PTYPE_TUNNEL_GRENAT,
3464 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3465 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3466 RTE_PTYPE_TUNNEL_GRENAT |
3467 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3468 RTE_PTYPE_INNER_L4_FRAG,
3469 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3470 RTE_PTYPE_TUNNEL_GRENAT |
3471 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3472 RTE_PTYPE_INNER_L4_NONFRAG,
3473 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3474 RTE_PTYPE_TUNNEL_GRENAT |
3475 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3476 RTE_PTYPE_INNER_L4_UDP,
3478 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3479 RTE_PTYPE_TUNNEL_GRENAT |
3480 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3481 RTE_PTYPE_INNER_L4_TCP,
3482 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3483 RTE_PTYPE_TUNNEL_GRENAT |
3484 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3485 RTE_PTYPE_INNER_L4_SCTP,
3486 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3487 RTE_PTYPE_TUNNEL_GRENAT |
3488 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3489 RTE_PTYPE_INNER_L4_ICMP,
3491 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3492 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3493 RTE_PTYPE_TUNNEL_GRENAT |
3494 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3495 RTE_PTYPE_INNER_L4_FRAG,
3496 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3497 RTE_PTYPE_TUNNEL_GRENAT |
3498 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3499 RTE_PTYPE_INNER_L4_NONFRAG,
3500 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3501 RTE_PTYPE_TUNNEL_GRENAT |
3502 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3503 RTE_PTYPE_INNER_L4_UDP,
3505 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3506 RTE_PTYPE_TUNNEL_GRENAT |
3507 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3508 RTE_PTYPE_INNER_L4_TCP,
3509 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3510 RTE_PTYPE_TUNNEL_GRENAT |
3511 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3512 RTE_PTYPE_INNER_L4_SCTP,
3513 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3514 RTE_PTYPE_TUNNEL_GRENAT |
3515 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3516 RTE_PTYPE_INNER_L4_ICMP,
3518 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3519 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3520 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3522 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3523 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3524 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3525 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3526 RTE_PTYPE_INNER_L4_FRAG,
3527 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3528 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3529 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3530 RTE_PTYPE_INNER_L4_NONFRAG,
3531 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3532 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3533 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3534 RTE_PTYPE_INNER_L4_UDP,
3536 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3537 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3538 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3539 RTE_PTYPE_INNER_L4_TCP,
3540 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3541 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3542 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3543 RTE_PTYPE_INNER_L4_SCTP,
3544 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3545 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3546 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3547 RTE_PTYPE_INNER_L4_ICMP,
3549 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3550 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3551 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3552 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3553 RTE_PTYPE_INNER_L4_FRAG,
3554 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3555 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3556 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3557 RTE_PTYPE_INNER_L4_NONFRAG,
3558 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3559 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3560 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3561 RTE_PTYPE_INNER_L4_UDP,
3563 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3564 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3565 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3566 RTE_PTYPE_INNER_L4_TCP,
3567 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3568 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3569 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3570 RTE_PTYPE_INNER_L4_SCTP,
3571 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3572 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3573 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3574 RTE_PTYPE_INNER_L4_ICMP,
3575 /* [73] - [87] reserved */
3577 /* Non tunneled IPv6 */
3578 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3580 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3581 RTE_PTYPE_L4_NONFRAG,
3582 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3585 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3587 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3589 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3593 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3594 RTE_PTYPE_TUNNEL_IP |
3595 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3596 RTE_PTYPE_INNER_L4_FRAG,
3597 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3598 RTE_PTYPE_TUNNEL_IP |
3599 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3600 RTE_PTYPE_INNER_L4_NONFRAG,
3601 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3602 RTE_PTYPE_TUNNEL_IP |
3603 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3604 RTE_PTYPE_INNER_L4_UDP,
3606 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3607 RTE_PTYPE_TUNNEL_IP |
3608 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3609 RTE_PTYPE_INNER_L4_TCP,
3610 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3611 RTE_PTYPE_TUNNEL_IP |
3612 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3613 RTE_PTYPE_INNER_L4_SCTP,
3614 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3615 RTE_PTYPE_TUNNEL_IP |
3616 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3617 RTE_PTYPE_INNER_L4_ICMP,
3620 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3621 RTE_PTYPE_TUNNEL_IP |
3622 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3623 RTE_PTYPE_INNER_L4_FRAG,
3624 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3625 RTE_PTYPE_TUNNEL_IP |
3626 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3627 RTE_PTYPE_INNER_L4_NONFRAG,
3628 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3629 RTE_PTYPE_TUNNEL_IP |
3630 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3631 RTE_PTYPE_INNER_L4_UDP,
3632 /* [105] reserved */
3633 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3634 RTE_PTYPE_TUNNEL_IP |
3635 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3636 RTE_PTYPE_INNER_L4_TCP,
3637 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3638 RTE_PTYPE_TUNNEL_IP |
3639 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3640 RTE_PTYPE_INNER_L4_SCTP,
3641 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3642 RTE_PTYPE_TUNNEL_IP |
3643 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3644 RTE_PTYPE_INNER_L4_ICMP,
3646 /* IPv6 --> GRE/Teredo/VXLAN */
3647 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3648 RTE_PTYPE_TUNNEL_GRENAT,
3650 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3651 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3652 RTE_PTYPE_TUNNEL_GRENAT |
3653 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3654 RTE_PTYPE_INNER_L4_FRAG,
3655 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3656 RTE_PTYPE_TUNNEL_GRENAT |
3657 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3658 RTE_PTYPE_INNER_L4_NONFRAG,
3659 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3660 RTE_PTYPE_TUNNEL_GRENAT |
3661 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3662 RTE_PTYPE_INNER_L4_UDP,
3663 /* [113] reserved */
3664 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3665 RTE_PTYPE_TUNNEL_GRENAT |
3666 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3667 RTE_PTYPE_INNER_L4_TCP,
3668 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3669 RTE_PTYPE_TUNNEL_GRENAT |
3670 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3671 RTE_PTYPE_INNER_L4_SCTP,
3672 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3673 RTE_PTYPE_TUNNEL_GRENAT |
3674 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3675 RTE_PTYPE_INNER_L4_ICMP,
3677 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3678 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3679 RTE_PTYPE_TUNNEL_GRENAT |
3680 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3681 RTE_PTYPE_INNER_L4_FRAG,
3682 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3683 RTE_PTYPE_TUNNEL_GRENAT |
3684 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3685 RTE_PTYPE_INNER_L4_NONFRAG,
3686 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3687 RTE_PTYPE_TUNNEL_GRENAT |
3688 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3689 RTE_PTYPE_INNER_L4_UDP,
3690 /* [120] reserved */
3691 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3692 RTE_PTYPE_TUNNEL_GRENAT |
3693 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3694 RTE_PTYPE_INNER_L4_TCP,
3695 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3696 RTE_PTYPE_TUNNEL_GRENAT |
3697 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3698 RTE_PTYPE_INNER_L4_SCTP,
3699 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3700 RTE_PTYPE_TUNNEL_GRENAT |
3701 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3702 RTE_PTYPE_INNER_L4_ICMP,
3704 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3705 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3706 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3708 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3709 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3710 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3711 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3712 RTE_PTYPE_INNER_L4_FRAG,
3713 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3714 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3715 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3716 RTE_PTYPE_INNER_L4_NONFRAG,
3717 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3718 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3719 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3720 RTE_PTYPE_INNER_L4_UDP,
3721 /* [128] reserved */
3722 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3723 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3724 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3725 RTE_PTYPE_INNER_L4_TCP,
3726 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3727 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3728 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3729 RTE_PTYPE_INNER_L4_SCTP,
3730 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3731 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3732 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3733 RTE_PTYPE_INNER_L4_ICMP,
3735 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3736 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3737 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3738 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3739 RTE_PTYPE_INNER_L4_FRAG,
3740 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3741 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3742 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3743 RTE_PTYPE_INNER_L4_NONFRAG,
3744 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3745 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3746 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3747 RTE_PTYPE_INNER_L4_UDP,
3748 /* [135] reserved */
3749 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3750 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3751 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3752 RTE_PTYPE_INNER_L4_TCP,
3753 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3754 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3755 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3756 RTE_PTYPE_INNER_L4_SCTP,
3757 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3758 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3759 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3760 RTE_PTYPE_INNER_L4_ICMP,
3761 /* [139] - [299] reserved */
3764 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3765 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3767 /* PPPoE --> IPv4 */
3768 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3769 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3771 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3772 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3773 RTE_PTYPE_L4_NONFRAG,
3774 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3775 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3777 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3778 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3780 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3781 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3783 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3784 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3787 /* PPPoE --> IPv6 */
3788 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3789 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3791 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3792 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3793 RTE_PTYPE_L4_NONFRAG,
3794 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3795 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3797 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3798 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3800 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3801 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3803 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3804 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3806 /* [314] - [324] reserved */
3808 /* IPv4/IPv6 --> GTPC/GTPU */
3809 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3810 RTE_PTYPE_TUNNEL_GTPC,
3811 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3812 RTE_PTYPE_TUNNEL_GTPC,
3813 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3814 RTE_PTYPE_TUNNEL_GTPC,
3815 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3816 RTE_PTYPE_TUNNEL_GTPC,
3817 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3818 RTE_PTYPE_TUNNEL_GTPU,
3819 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3820 RTE_PTYPE_TUNNEL_GTPU,
3822 /* IPv4 --> GTPU --> IPv4 */
3823 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3824 RTE_PTYPE_TUNNEL_GTPU |
3825 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3826 RTE_PTYPE_INNER_L4_FRAG,
3827 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3828 RTE_PTYPE_TUNNEL_GTPU |
3829 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3830 RTE_PTYPE_INNER_L4_NONFRAG,
3831 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3832 RTE_PTYPE_TUNNEL_GTPU |
3833 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3834 RTE_PTYPE_INNER_L4_UDP,
3835 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3836 RTE_PTYPE_TUNNEL_GTPU |
3837 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3838 RTE_PTYPE_INNER_L4_TCP,
3839 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3840 RTE_PTYPE_TUNNEL_GTPU |
3841 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3842 RTE_PTYPE_INNER_L4_ICMP,
3844 /* IPv6 --> GTPU --> IPv4 */
3845 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3846 RTE_PTYPE_TUNNEL_GTPU |
3847 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3848 RTE_PTYPE_INNER_L4_FRAG,
3849 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3850 RTE_PTYPE_TUNNEL_GTPU |
3851 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3852 RTE_PTYPE_INNER_L4_NONFRAG,
3853 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3854 RTE_PTYPE_TUNNEL_GTPU |
3855 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3856 RTE_PTYPE_INNER_L4_UDP,
3857 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3858 RTE_PTYPE_TUNNEL_GTPU |
3859 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3860 RTE_PTYPE_INNER_L4_TCP,
3861 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3862 RTE_PTYPE_TUNNEL_GTPU |
3863 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3864 RTE_PTYPE_INNER_L4_ICMP,
3866 /* IPv4 --> GTPU --> IPv6 */
3867 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3868 RTE_PTYPE_TUNNEL_GTPU |
3869 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3870 RTE_PTYPE_INNER_L4_FRAG,
3871 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3872 RTE_PTYPE_TUNNEL_GTPU |
3873 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3874 RTE_PTYPE_INNER_L4_NONFRAG,
3875 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3876 RTE_PTYPE_TUNNEL_GTPU |
3877 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3878 RTE_PTYPE_INNER_L4_UDP,
3879 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3880 RTE_PTYPE_TUNNEL_GTPU |
3881 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3882 RTE_PTYPE_INNER_L4_TCP,
3883 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3884 RTE_PTYPE_TUNNEL_GTPU |
3885 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3886 RTE_PTYPE_INNER_L4_ICMP,
3888 /* IPv6 --> GTPU --> IPv6 */
3889 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3890 RTE_PTYPE_TUNNEL_GTPU |
3891 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3892 RTE_PTYPE_INNER_L4_FRAG,
3893 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3894 RTE_PTYPE_TUNNEL_GTPU |
3895 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3896 RTE_PTYPE_INNER_L4_NONFRAG,
3897 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3898 RTE_PTYPE_TUNNEL_GTPU |
3899 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3900 RTE_PTYPE_INNER_L4_UDP,
3901 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3902 RTE_PTYPE_TUNNEL_GTPU |
3903 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3904 RTE_PTYPE_INNER_L4_TCP,
3905 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3906 RTE_PTYPE_TUNNEL_GTPU |
3907 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3908 RTE_PTYPE_INNER_L4_ICMP,
3910 /* IPv4 --> UDP ECPRI */
3911 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3913 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3915 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3917 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3919 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3921 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3923 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3925 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3927 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3929 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3932 /* IPV6 --> UDP ECPRI */
3933 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3935 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3937 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3939 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3941 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3943 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3945 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3947 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3949 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3951 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3953 /* All others reserved */
3956 return type_table[ptype];
3960 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3962 struct ice_adapter *ad =
3963 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3966 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3967 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
3970 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
3971 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
3972 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
3973 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
3974 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
3976 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
3977 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
3978 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
3979 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
3980 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
3981 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
3984 * check the programming status descriptor in rx queue.
3985 * done after Programming Flow Director is programmed on
3989 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
3991 volatile union ice_32byte_rx_desc *rxdp;
3998 rxdp = (volatile union ice_32byte_rx_desc *)
3999 (&rxq->rx_ring[rxq->rx_tail]);
4000 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4001 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4002 >> ICE_RXD_QW1_STATUS_S;
4004 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4006 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4007 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4008 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4009 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4011 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4012 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4013 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4014 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4018 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4019 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4021 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4025 rxdp->wb.qword1.status_error_len = 0;
4027 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4029 if (rxq->rx_tail == 0)
4030 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4032 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4038 #define ICE_FDIR_MAX_WAIT_US 10000
4041 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4043 struct ice_tx_queue *txq = pf->fdir.txq;
4044 struct ice_rx_queue *rxq = pf->fdir.rxq;
4045 volatile struct ice_fltr_desc *fdirdp;
4046 volatile struct ice_tx_desc *txdp;
4050 fdirdp = (volatile struct ice_fltr_desc *)
4051 (&txq->tx_ring[txq->tx_tail]);
4052 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4053 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4055 txdp = &txq->tx_ring[txq->tx_tail + 1];
4056 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4057 td_cmd = ICE_TX_DESC_CMD_EOP |
4058 ICE_TX_DESC_CMD_RS |
4059 ICE_TX_DESC_CMD_DUMMY;
4061 txdp->cmd_type_offset_bsz =
4062 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4065 if (txq->tx_tail >= txq->nb_tx_desc)
4067 /* Update the tx tail register */
4068 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4069 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4070 if ((txdp->cmd_type_offset_bsz &
4071 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4072 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4076 if (i >= ICE_FDIR_MAX_WAIT_US) {
4078 "Failed to program FDIR filter: time out to get DD on tx queue.");
4082 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4085 ret = ice_check_fdir_programming_status(rxq);
4093 "Failed to program FDIR filter: programming status reported.");