1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <ethdev_driver.h>
9 #include "rte_pmd_ice.h"
11 #include "ice_rxtx_vec_common.h"
13 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
17 PKT_TX_OUTER_IP_CKSUM)
19 /* Offset of mbuf dynamic field for protocol extraction data */
20 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
22 /* Mask of mbuf dynamic flags for protocol extraction type */
23 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
28 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
31 ice_monitor_callback(const uint64_t value,
32 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
34 const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
36 * we expect the DD bit to be set to 1 if this descriptor was already
39 return (value & m) == m ? -1 : 0;
43 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
45 volatile union ice_rx_flex_desc *rxdp;
46 struct ice_rx_queue *rxq = rx_queue;
50 rxdp = &rxq->rx_ring[desc];
51 /* watch for changes in status bit */
52 pmc->addr = &rxdp->wb.status_error0;
54 /* comparison callback */
55 pmc->fn = ice_monitor_callback;
57 /* register is 16-bit */
58 pmc->size = sizeof(uint16_t);
65 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
67 static uint8_t rxdid_map[] = {
68 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
69 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
70 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
71 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
72 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
73 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
74 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
77 return xtr_type < RTE_DIM(rxdid_map) ?
78 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
82 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
84 volatile union ice_rx_flex_desc *rxdp)
86 volatile struct ice_32b_rx_flex_desc_comms *desc =
87 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
88 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
90 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
91 mb->ol_flags |= PKT_RX_RSS_HASH;
92 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
95 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
96 if (desc->flow_id != 0xFFFFFFFF) {
97 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
98 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
104 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
106 volatile union ice_rx_flex_desc *rxdp)
108 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
109 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
110 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
114 if (desc->flow_id != 0xFFFFFFFF) {
115 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
116 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
119 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
120 stat_err = rte_le_to_cpu_16(desc->status_error0);
121 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
122 mb->ol_flags |= PKT_RX_RSS_HASH;
123 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
129 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
131 volatile union ice_rx_flex_desc *rxdp)
133 volatile struct ice_32b_rx_flex_desc_comms *desc =
134 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
137 stat_err = rte_le_to_cpu_16(desc->status_error0);
138 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
139 mb->ol_flags |= PKT_RX_RSS_HASH;
140 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
143 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
144 if (desc->flow_id != 0xFFFFFFFF) {
145 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
146 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
149 if (rxq->xtr_ol_flag) {
150 uint32_t metadata = 0;
152 stat_err = rte_le_to_cpu_16(desc->status_error1);
154 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
155 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
157 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
159 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
162 mb->ol_flags |= rxq->xtr_ol_flag;
164 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
171 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
173 volatile union ice_rx_flex_desc *rxdp)
175 volatile struct ice_32b_rx_flex_desc_comms *desc =
176 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
179 stat_err = rte_le_to_cpu_16(desc->status_error0);
180 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
181 mb->ol_flags |= PKT_RX_RSS_HASH;
182 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
185 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
186 if (desc->flow_id != 0xFFFFFFFF) {
187 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
188 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
191 if (rxq->xtr_ol_flag) {
192 uint32_t metadata = 0;
194 if (desc->flex_ts.flex.aux0 != 0xFFFF)
195 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
196 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
197 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
200 mb->ol_flags |= rxq->xtr_ol_flag;
202 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
209 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
212 case ICE_RXDID_COMMS_AUX_VLAN:
213 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
214 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
217 case ICE_RXDID_COMMS_AUX_IPV4:
218 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
219 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
222 case ICE_RXDID_COMMS_AUX_IPV6:
223 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
224 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
227 case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
228 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
229 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
232 case ICE_RXDID_COMMS_AUX_TCP:
233 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
234 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
237 case ICE_RXDID_COMMS_AUX_IP_OFFSET:
238 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
239 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
242 case ICE_RXDID_COMMS_GENERIC:
243 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
246 case ICE_RXDID_COMMS_OVS:
247 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
251 /* update this according to the RXDID for PROTO_XTR_NONE */
252 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
256 if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
257 rxq->xtr_ol_flag = 0;
260 static enum ice_status
261 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
263 struct ice_vsi *vsi = rxq->vsi;
264 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
265 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
266 struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
267 struct ice_rlan_ctx rx_ctx;
270 struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
271 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
274 /* Set buffer size as the head split is disabled. */
275 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
276 RTE_PKTMBUF_HEADROOM);
278 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
279 rxq->max_pkt_len = RTE_MIN((uint32_t)
280 ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
281 dev_data->dev_conf.rxmode.max_rx_pkt_len);
283 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
284 if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
285 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
286 PMD_DRV_LOG(ERR, "maximum packet length must "
287 "be larger than %u and smaller than %u,"
288 "as jumbo frame is enabled",
289 (uint32_t)ICE_ETH_MAX_LEN,
290 (uint32_t)ICE_FRAME_SIZE_MAX);
294 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
295 rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
296 PMD_DRV_LOG(ERR, "maximum packet length must be "
297 "larger than %u and smaller than %u, "
298 "as jumbo frame is disabled",
299 (uint32_t)RTE_ETHER_MIN_LEN,
300 (uint32_t)ICE_ETH_MAX_LEN);
305 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
306 /* Register mbuf field and flag for Rx timestamp */
307 err = rte_mbuf_dyn_rx_timestamp_register(
308 &ice_timestamp_dynfield_offset,
309 &ice_timestamp_dynflag);
312 "Cannot register mbuf field/flag for timestamp");
317 memset(&rx_ctx, 0, sizeof(rx_ctx));
319 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
320 rx_ctx.qlen = rxq->nb_rx_desc;
321 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
322 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
323 rx_ctx.dtype = 0; /* No Header Split mode */
324 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
325 rx_ctx.dsize = 1; /* 32B descriptors */
327 rx_ctx.rxmax = rxq->max_pkt_len;
328 /* TPH: Transaction Layer Packet (TLP) processing hints */
329 rx_ctx.tphrdesc_ena = 1;
330 rx_ctx.tphwdesc_ena = 1;
331 rx_ctx.tphdata_ena = 1;
332 rx_ctx.tphhead_ena = 1;
333 /* Low Receive Queue Threshold defined in 64 descriptors units.
334 * When the number of free descriptors goes below the lrxqthresh,
335 * an immediate interrupt is triggered.
337 rx_ctx.lrxqthresh = 2;
338 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
341 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
343 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
345 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
346 rxq->port_id, rxq->queue_id, rxdid);
348 if (!(pf->supported_rxdid & BIT(rxdid))) {
349 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
354 ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
356 /* Enable Flexible Descriptors in the queue context which
357 * allows this driver to select a specific receive descriptor format
359 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
360 QRXFLXP_CNTXT_RXDID_IDX_M;
362 /* increasing context priority to pick up profile ID;
363 * default is 0x01; setting to 0x03 to ensure profile
364 * is programming if prev context is of same priority
366 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
367 QRXFLXP_CNTXT_RXDID_PRIO_M;
369 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
370 regval |= QRXFLXP_CNTXT_TS_M;
372 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
374 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
376 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
380 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
382 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
387 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
388 RTE_PKTMBUF_HEADROOM);
390 /* Check if scattered RX needs to be used. */
391 if (rxq->max_pkt_len > buf_size)
392 dev_data->scattered_rx = 1;
394 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
396 /* Init the Rx tail register*/
397 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
402 /* Allocate mbufs for all descriptors in rx queue */
404 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
406 struct ice_rx_entry *rxe = rxq->sw_ring;
410 for (i = 0; i < rxq->nb_rx_desc; i++) {
411 volatile union ice_rx_flex_desc *rxd;
412 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
414 if (unlikely(!mbuf)) {
415 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
419 rte_mbuf_refcnt_set(mbuf, 1);
421 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
423 mbuf->port = rxq->port_id;
426 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
428 rxd = &rxq->rx_ring[i];
429 rxd->read.pkt_addr = dma_addr;
430 rxd->read.hdr_addr = 0;
431 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
441 /* Free all mbufs for descriptors in rx queue */
443 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
447 if (!rxq || !rxq->sw_ring) {
448 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
452 for (i = 0; i < rxq->nb_rx_desc; i++) {
453 if (rxq->sw_ring[i].mbuf) {
454 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
455 rxq->sw_ring[i].mbuf = NULL;
458 if (rxq->rx_nb_avail == 0)
460 for (i = 0; i < rxq->rx_nb_avail; i++)
461 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
463 rxq->rx_nb_avail = 0;
466 /* turn on or off rx queue
467 * @q_idx: queue index in pf scope
468 * @on: turn on or off the queue
471 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
476 /* QRX_CTRL = QRX_ENA */
477 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
480 if (reg & QRX_CTRL_QENA_STAT_M)
481 return 0; /* Already on, skip */
482 reg |= QRX_CTRL_QENA_REQ_M;
484 if (!(reg & QRX_CTRL_QENA_STAT_M))
485 return 0; /* Already off, skip */
486 reg &= ~QRX_CTRL_QENA_REQ_M;
489 /* Write the register */
490 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
491 /* Check the result. It is said that QENA_STAT
492 * follows the QENA_REQ not more than 10 use.
493 * TODO: need to change the wait counter later
495 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
496 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
497 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
499 if ((reg & QRX_CTRL_QENA_REQ_M) &&
500 (reg & QRX_CTRL_QENA_STAT_M))
503 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
504 !(reg & QRX_CTRL_QENA_STAT_M))
509 /* Check if it is timeout */
510 if (j >= ICE_CHK_Q_ENA_COUNT) {
511 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
512 (on ? "enable" : "disable"), q_idx);
520 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
524 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
525 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
526 "rxq->rx_free_thresh=%d, "
527 "ICE_RX_MAX_BURST=%d",
528 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
530 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
531 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
532 "rxq->rx_free_thresh=%d, "
533 "rxq->nb_rx_desc=%d",
534 rxq->rx_free_thresh, rxq->nb_rx_desc);
536 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
537 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
538 "rxq->nb_rx_desc=%d, "
539 "rxq->rx_free_thresh=%d",
540 rxq->nb_rx_desc, rxq->rx_free_thresh);
547 /* reset fields in ice_rx_queue back to default */
549 ice_reset_rx_queue(struct ice_rx_queue *rxq)
555 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
559 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
561 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
562 ((volatile char *)rxq->rx_ring)[i] = 0;
564 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
565 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
566 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
568 rxq->rx_nb_avail = 0;
569 rxq->rx_next_avail = 0;
570 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
574 rxq->pkt_first_seg = NULL;
575 rxq->pkt_last_seg = NULL;
577 rxq->rxrearm_start = 0;
582 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
584 struct ice_rx_queue *rxq;
586 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
588 PMD_INIT_FUNC_TRACE();
590 if (rx_queue_id >= dev->data->nb_rx_queues) {
591 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
592 rx_queue_id, dev->data->nb_rx_queues);
596 rxq = dev->data->rx_queues[rx_queue_id];
597 if (!rxq || !rxq->q_set) {
598 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
603 err = ice_program_hw_rx_queue(rxq);
605 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
610 err = ice_alloc_rx_queue_mbufs(rxq);
612 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
616 /* Init the RX tail register. */
617 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
619 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
621 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
624 rxq->rx_rel_mbufs(rxq);
625 ice_reset_rx_queue(rxq);
629 dev->data->rx_queue_state[rx_queue_id] =
630 RTE_ETH_QUEUE_STATE_STARTED;
636 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
638 struct ice_rx_queue *rxq;
640 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 if (rx_queue_id < dev->data->nb_rx_queues) {
643 rxq = dev->data->rx_queues[rx_queue_id];
645 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
647 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
651 rxq->rx_rel_mbufs(rxq);
652 ice_reset_rx_queue(rxq);
653 dev->data->rx_queue_state[rx_queue_id] =
654 RTE_ETH_QUEUE_STATE_STOPPED;
661 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
663 struct ice_tx_queue *txq;
667 struct ice_aqc_add_tx_qgrp *txq_elem;
668 struct ice_tlan_ctx tx_ctx;
671 PMD_INIT_FUNC_TRACE();
673 if (tx_queue_id >= dev->data->nb_tx_queues) {
674 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
675 tx_queue_id, dev->data->nb_tx_queues);
679 txq = dev->data->tx_queues[tx_queue_id];
680 if (!txq || !txq->q_set) {
681 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
686 buf_len = ice_struct_size(txq_elem, txqs, 1);
687 txq_elem = ice_malloc(hw, buf_len);
692 hw = ICE_VSI_TO_HW(vsi);
694 memset(&tx_ctx, 0, sizeof(tx_ctx));
695 txq_elem->num_txqs = 1;
696 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
698 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
699 tx_ctx.qlen = txq->nb_tx_desc;
700 tx_ctx.pf_num = hw->pf_id;
701 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
702 tx_ctx.src_vsi = vsi->vsi_id;
703 tx_ctx.port_num = hw->port_info->lport;
704 tx_ctx.tso_ena = 1; /* tso enable */
705 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
706 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
708 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
711 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
713 /* Init the Tx tail register*/
714 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
716 /* Fix me, we assume TC always 0 here */
717 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
718 txq_elem, buf_len, NULL);
720 PMD_DRV_LOG(ERR, "Failed to add lan txq");
724 /* store the schedule node id */
725 txq->q_teid = txq_elem->txqs[0].q_teid;
727 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
733 static enum ice_status
734 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
736 struct ice_vsi *vsi = rxq->vsi;
737 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
738 uint32_t rxdid = ICE_RXDID_LEGACY_1;
739 struct ice_rlan_ctx rx_ctx;
744 rxq->rx_buf_len = 1024;
746 memset(&rx_ctx, 0, sizeof(rx_ctx));
748 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
749 rx_ctx.qlen = rxq->nb_rx_desc;
750 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
751 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
752 rx_ctx.dtype = 0; /* No Header Split mode */
753 rx_ctx.dsize = 1; /* 32B descriptors */
754 rx_ctx.rxmax = ICE_ETH_MAX_LEN;
755 /* TPH: Transaction Layer Packet (TLP) processing hints */
756 rx_ctx.tphrdesc_ena = 1;
757 rx_ctx.tphwdesc_ena = 1;
758 rx_ctx.tphdata_ena = 1;
759 rx_ctx.tphhead_ena = 1;
760 /* Low Receive Queue Threshold defined in 64 descriptors units.
761 * When the number of free descriptors goes below the lrxqthresh,
762 * an immediate interrupt is triggered.
764 rx_ctx.lrxqthresh = 2;
765 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
768 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
770 /* Enable Flexible Descriptors in the queue context which
771 * allows this driver to select a specific receive descriptor format
773 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
774 QRXFLXP_CNTXT_RXDID_IDX_M;
776 /* increasing context priority to pick up profile ID;
777 * default is 0x01; setting to 0x03 to ensure profile
778 * is programming if prev context is of same priority
780 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
781 QRXFLXP_CNTXT_RXDID_PRIO_M;
783 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
785 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
787 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
791 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
793 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
798 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
800 /* Init the Rx tail register*/
801 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
807 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
809 struct ice_rx_queue *rxq;
811 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
812 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
814 PMD_INIT_FUNC_TRACE();
817 if (!rxq || !rxq->q_set) {
818 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
823 err = ice_fdir_program_hw_rx_queue(rxq);
825 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
830 /* Init the RX tail register. */
831 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
833 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
835 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
838 ice_reset_rx_queue(rxq);
846 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
848 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
849 struct ice_tx_queue *txq;
853 struct ice_aqc_add_tx_qgrp *txq_elem;
854 struct ice_tlan_ctx tx_ctx;
857 PMD_INIT_FUNC_TRACE();
860 if (!txq || !txq->q_set) {
861 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
866 buf_len = ice_struct_size(txq_elem, txqs, 1);
867 txq_elem = ice_malloc(hw, buf_len);
872 hw = ICE_VSI_TO_HW(vsi);
874 memset(&tx_ctx, 0, sizeof(tx_ctx));
875 txq_elem->num_txqs = 1;
876 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
878 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
879 tx_ctx.qlen = txq->nb_tx_desc;
880 tx_ctx.pf_num = hw->pf_id;
881 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
882 tx_ctx.src_vsi = vsi->vsi_id;
883 tx_ctx.port_num = hw->port_info->lport;
884 tx_ctx.tso_ena = 1; /* tso enable */
885 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
886 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
888 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
891 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
893 /* Init the Tx tail register*/
894 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
896 /* Fix me, we assume TC always 0 here */
897 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
898 txq_elem, buf_len, NULL);
900 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
904 /* store the schedule node id */
905 txq->q_teid = txq_elem->txqs[0].q_teid;
911 /* Free all mbufs for descriptors in tx queue */
913 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
917 if (!txq || !txq->sw_ring) {
918 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
922 for (i = 0; i < txq->nb_tx_desc; i++) {
923 if (txq->sw_ring[i].mbuf) {
924 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
925 txq->sw_ring[i].mbuf = NULL;
931 ice_reset_tx_queue(struct ice_tx_queue *txq)
933 struct ice_tx_entry *txe;
934 uint16_t i, prev, size;
937 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
942 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
943 for (i = 0; i < size; i++)
944 ((volatile char *)txq->tx_ring)[i] = 0;
946 prev = (uint16_t)(txq->nb_tx_desc - 1);
947 for (i = 0; i < txq->nb_tx_desc; i++) {
948 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
950 txd->cmd_type_offset_bsz =
951 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
954 txe[prev].next_id = i;
958 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
959 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
964 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
965 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
969 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
971 struct ice_tx_queue *txq;
972 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
973 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
974 struct ice_vsi *vsi = pf->main_vsi;
975 enum ice_status status;
978 uint16_t q_handle = tx_queue_id;
980 if (tx_queue_id >= dev->data->nb_tx_queues) {
981 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
982 tx_queue_id, dev->data->nb_tx_queues);
986 txq = dev->data->tx_queues[tx_queue_id];
988 PMD_DRV_LOG(ERR, "TX queue %u is not available",
993 q_ids[0] = txq->reg_idx;
994 q_teids[0] = txq->q_teid;
996 /* Fix me, we assume TC always 0 here */
997 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
998 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
999 if (status != ICE_SUCCESS) {
1000 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1004 txq->tx_rel_mbufs(txq);
1005 ice_reset_tx_queue(txq);
1006 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1012 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1014 struct ice_rx_queue *rxq;
1016 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1017 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1021 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1023 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1027 rxq->rx_rel_mbufs(rxq);
1033 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1035 struct ice_tx_queue *txq;
1036 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1037 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1038 struct ice_vsi *vsi = pf->main_vsi;
1039 enum ice_status status;
1041 uint32_t q_teids[1];
1042 uint16_t q_handle = tx_queue_id;
1046 PMD_DRV_LOG(ERR, "TX queue %u is not available",
1052 q_ids[0] = txq->reg_idx;
1053 q_teids[0] = txq->q_teid;
1055 /* Fix me, we assume TC always 0 here */
1056 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1057 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1058 if (status != ICE_SUCCESS) {
1059 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1063 txq->tx_rel_mbufs(txq);
1069 ice_rx_queue_setup(struct rte_eth_dev *dev,
1072 unsigned int socket_id,
1073 const struct rte_eth_rxconf *rx_conf,
1074 struct rte_mempool *mp)
1076 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1077 struct ice_adapter *ad =
1078 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1079 struct ice_vsi *vsi = pf->main_vsi;
1080 struct ice_rx_queue *rxq;
1081 const struct rte_memzone *rz;
1084 int use_def_burst_func = 1;
1087 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1088 nb_desc > ICE_MAX_RING_DESC ||
1089 nb_desc < ICE_MIN_RING_DESC) {
1090 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1091 "invalid", nb_desc);
1095 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1097 /* Free memory if needed */
1098 if (dev->data->rx_queues[queue_idx]) {
1099 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1100 dev->data->rx_queues[queue_idx] = NULL;
1103 /* Allocate the rx queue data structure */
1104 rxq = rte_zmalloc_socket(NULL,
1105 sizeof(struct ice_rx_queue),
1106 RTE_CACHE_LINE_SIZE,
1109 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1110 "rx queue data structure");
1114 rxq->nb_rx_desc = nb_desc;
1115 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1116 rxq->queue_id = queue_idx;
1117 rxq->offloads = offloads;
1119 rxq->reg_idx = vsi->base_queue + queue_idx;
1120 rxq->port_id = dev->data->port_id;
1121 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1122 rxq->crc_len = RTE_ETHER_CRC_LEN;
1126 rxq->drop_en = rx_conf->rx_drop_en;
1128 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1129 rxq->proto_xtr = pf->proto_xtr != NULL ?
1130 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1132 /* Allocate the maximun number of RX ring hardware descriptor. */
1133 len = ICE_MAX_RING_DESC;
1136 * Allocating a little more memory because vectorized/bulk_alloc Rx
1137 * functions doesn't check boundaries each time.
1139 len += ICE_RX_MAX_BURST;
1141 /* Allocate the maximum number of RX ring hardware descriptor. */
1142 ring_size = sizeof(union ice_rx_flex_desc) * len;
1143 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1144 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1145 ring_size, ICE_RING_BASE_ALIGN,
1148 ice_rx_queue_release(rxq);
1149 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1153 /* Zero all the descriptors in the ring. */
1154 memset(rz->addr, 0, ring_size);
1156 rxq->rx_ring_dma = rz->iova;
1157 rxq->rx_ring = rz->addr;
1159 /* always reserve more for bulk alloc */
1160 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1162 /* Allocate the software ring. */
1163 rxq->sw_ring = rte_zmalloc_socket(NULL,
1164 sizeof(struct ice_rx_entry) * len,
1165 RTE_CACHE_LINE_SIZE,
1167 if (!rxq->sw_ring) {
1168 ice_rx_queue_release(rxq);
1169 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1173 ice_reset_rx_queue(rxq);
1175 dev->data->rx_queues[queue_idx] = rxq;
1176 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1178 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1180 if (!use_def_burst_func) {
1181 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1182 "satisfied. Rx Burst Bulk Alloc function will be "
1183 "used on port=%d, queue=%d.",
1184 rxq->port_id, rxq->queue_id);
1186 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1187 "not satisfied, Scattered Rx is requested. "
1188 "on port=%d, queue=%d.",
1189 rxq->port_id, rxq->queue_id);
1190 ad->rx_bulk_alloc_allowed = false;
1197 ice_rx_queue_release(void *rxq)
1199 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1202 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1207 rte_free(q->sw_ring);
1212 ice_tx_queue_setup(struct rte_eth_dev *dev,
1215 unsigned int socket_id,
1216 const struct rte_eth_txconf *tx_conf)
1218 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1219 struct ice_vsi *vsi = pf->main_vsi;
1220 struct ice_tx_queue *txq;
1221 const struct rte_memzone *tz;
1223 uint16_t tx_rs_thresh, tx_free_thresh;
1226 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1228 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1229 nb_desc > ICE_MAX_RING_DESC ||
1230 nb_desc < ICE_MIN_RING_DESC) {
1231 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1232 "invalid", nb_desc);
1237 * The following two parameters control the setting of the RS bit on
1238 * transmit descriptors. TX descriptors will have their RS bit set
1239 * after txq->tx_rs_thresh descriptors have been used. The TX
1240 * descriptor ring will be cleaned after txq->tx_free_thresh
1241 * descriptors are used or if the number of descriptors required to
1242 * transmit a packet is greater than the number of free TX descriptors.
1244 * The following constraints must be satisfied:
1245 * - tx_rs_thresh must be greater than 0.
1246 * - tx_rs_thresh must be less than the size of the ring minus 2.
1247 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1248 * - tx_rs_thresh must be a divisor of the ring size.
1249 * - tx_free_thresh must be greater than 0.
1250 * - tx_free_thresh must be less than the size of the ring minus 3.
1251 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1253 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1254 * race condition, hence the maximum threshold constraints. When set
1255 * to zero use default values.
1257 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1258 tx_conf->tx_free_thresh :
1259 ICE_DEFAULT_TX_FREE_THRESH);
1260 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1262 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1263 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1264 if (tx_conf->tx_rs_thresh)
1265 tx_rs_thresh = tx_conf->tx_rs_thresh;
1266 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1267 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1268 "exceed nb_desc. (tx_rs_thresh=%u "
1269 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1270 (unsigned int)tx_rs_thresh,
1271 (unsigned int)tx_free_thresh,
1272 (unsigned int)nb_desc,
1273 (int)dev->data->port_id,
1277 if (tx_rs_thresh >= (nb_desc - 2)) {
1278 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1279 "number of TX descriptors minus 2. "
1280 "(tx_rs_thresh=%u port=%d queue=%d)",
1281 (unsigned int)tx_rs_thresh,
1282 (int)dev->data->port_id,
1286 if (tx_free_thresh >= (nb_desc - 3)) {
1287 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1288 "tx_free_thresh must be less than the "
1289 "number of TX descriptors minus 3. "
1290 "(tx_free_thresh=%u port=%d queue=%d)",
1291 (unsigned int)tx_free_thresh,
1292 (int)dev->data->port_id,
1296 if (tx_rs_thresh > tx_free_thresh) {
1297 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1298 "equal to tx_free_thresh. (tx_free_thresh=%u"
1299 " tx_rs_thresh=%u port=%d queue=%d)",
1300 (unsigned int)tx_free_thresh,
1301 (unsigned int)tx_rs_thresh,
1302 (int)dev->data->port_id,
1306 if ((nb_desc % tx_rs_thresh) != 0) {
1307 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1308 "number of TX descriptors. (tx_rs_thresh=%u"
1309 " port=%d queue=%d)",
1310 (unsigned int)tx_rs_thresh,
1311 (int)dev->data->port_id,
1315 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1316 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1317 "tx_rs_thresh is greater than 1. "
1318 "(tx_rs_thresh=%u port=%d queue=%d)",
1319 (unsigned int)tx_rs_thresh,
1320 (int)dev->data->port_id,
1325 /* Free memory if needed. */
1326 if (dev->data->tx_queues[queue_idx]) {
1327 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1328 dev->data->tx_queues[queue_idx] = NULL;
1331 /* Allocate the TX queue data structure. */
1332 txq = rte_zmalloc_socket(NULL,
1333 sizeof(struct ice_tx_queue),
1334 RTE_CACHE_LINE_SIZE,
1337 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1338 "tx queue structure");
1342 /* Allocate TX hardware ring descriptors. */
1343 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1344 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1345 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1346 ring_size, ICE_RING_BASE_ALIGN,
1349 ice_tx_queue_release(txq);
1350 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1354 txq->nb_tx_desc = nb_desc;
1355 txq->tx_rs_thresh = tx_rs_thresh;
1356 txq->tx_free_thresh = tx_free_thresh;
1357 txq->pthresh = tx_conf->tx_thresh.pthresh;
1358 txq->hthresh = tx_conf->tx_thresh.hthresh;
1359 txq->wthresh = tx_conf->tx_thresh.wthresh;
1360 txq->queue_id = queue_idx;
1362 txq->reg_idx = vsi->base_queue + queue_idx;
1363 txq->port_id = dev->data->port_id;
1364 txq->offloads = offloads;
1366 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1368 txq->tx_ring_dma = tz->iova;
1369 txq->tx_ring = tz->addr;
1371 /* Allocate software ring */
1373 rte_zmalloc_socket(NULL,
1374 sizeof(struct ice_tx_entry) * nb_desc,
1375 RTE_CACHE_LINE_SIZE,
1377 if (!txq->sw_ring) {
1378 ice_tx_queue_release(txq);
1379 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1383 ice_reset_tx_queue(txq);
1385 dev->data->tx_queues[queue_idx] = txq;
1386 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1387 ice_set_tx_function_flag(dev, txq);
1393 ice_tx_queue_release(void *txq)
1395 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1398 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1403 rte_free(q->sw_ring);
1408 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1409 struct rte_eth_rxq_info *qinfo)
1411 struct ice_rx_queue *rxq;
1413 rxq = dev->data->rx_queues[queue_id];
1415 qinfo->mp = rxq->mp;
1416 qinfo->scattered_rx = dev->data->scattered_rx;
1417 qinfo->nb_desc = rxq->nb_rx_desc;
1419 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1420 qinfo->conf.rx_drop_en = rxq->drop_en;
1421 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1425 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1426 struct rte_eth_txq_info *qinfo)
1428 struct ice_tx_queue *txq;
1430 txq = dev->data->tx_queues[queue_id];
1432 qinfo->nb_desc = txq->nb_tx_desc;
1434 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1435 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1436 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1438 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1439 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1440 qinfo->conf.offloads = txq->offloads;
1441 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1445 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1447 #define ICE_RXQ_SCAN_INTERVAL 4
1448 volatile union ice_rx_flex_desc *rxdp;
1449 struct ice_rx_queue *rxq;
1452 rxq = dev->data->rx_queues[rx_queue_id];
1453 rxdp = &rxq->rx_ring[rxq->rx_tail];
1454 while ((desc < rxq->nb_rx_desc) &&
1455 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1456 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1458 * Check the DD bit of a rx descriptor of each 4 in a group,
1459 * to avoid checking too frequently and downgrading performance
1462 desc += ICE_RXQ_SCAN_INTERVAL;
1463 rxdp += ICE_RXQ_SCAN_INTERVAL;
1464 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1465 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1466 desc - rxq->nb_rx_desc]);
1472 #define ICE_RX_FLEX_ERR0_BITS \
1473 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1474 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1475 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1476 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1477 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1478 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1480 /* Rx L3/L4 checksum */
1481 static inline uint64_t
1482 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1486 /* check if HW has decoded the packet and checksum */
1487 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1490 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1491 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1495 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1496 flags |= PKT_RX_IP_CKSUM_BAD;
1498 flags |= PKT_RX_IP_CKSUM_GOOD;
1500 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1501 flags |= PKT_RX_L4_CKSUM_BAD;
1503 flags |= PKT_RX_L4_CKSUM_GOOD;
1505 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1506 flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1508 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1509 flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1511 flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1517 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1519 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1520 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1521 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1523 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1524 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1525 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1530 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1531 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1532 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1533 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1534 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1535 mb->vlan_tci_outer = mb->vlan_tci;
1536 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1537 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1538 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1539 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1541 mb->vlan_tci_outer = 0;
1544 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1545 mb->vlan_tci, mb->vlan_tci_outer);
1548 #define ICE_LOOK_AHEAD 8
1549 #if (ICE_LOOK_AHEAD != 8)
1550 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1553 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1555 volatile union ice_rx_flex_desc *rxdp;
1556 struct ice_rx_entry *rxep;
1557 struct rte_mbuf *mb;
1560 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1561 int32_t i, j, nb_rx = 0;
1562 uint64_t pkt_flags = 0;
1563 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1564 struct ice_vsi *vsi = rxq->vsi;
1565 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1568 rxdp = &rxq->rx_ring[rxq->rx_tail];
1569 rxep = &rxq->sw_ring[rxq->rx_tail];
1571 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1573 /* Make sure there is at least 1 packet to receive */
1574 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1578 * Scan LOOK_AHEAD descriptors at a time to determine which
1579 * descriptors reference packets that are ready to be received.
1581 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1582 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1583 /* Read desc statuses backwards to avoid race condition */
1584 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1585 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1589 /* Compute how many status bits were set */
1590 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1591 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1595 /* Translate descriptor info to mbuf parameters */
1596 for (j = 0; j < nb_dd; j++) {
1598 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1599 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1600 mb->data_len = pkt_len;
1601 mb->pkt_len = pkt_len;
1603 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1604 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1605 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1606 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1607 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1608 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1610 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1611 ts_ns = ice_tstamp_convert_32b_64b(hw,
1612 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
1613 if (ice_timestamp_dynflag > 0) {
1614 *RTE_MBUF_DYNFIELD(mb,
1615 ice_timestamp_dynfield_offset,
1616 rte_mbuf_timestamp_t *) = ts_ns;
1617 mb->ol_flags |= ice_timestamp_dynflag;
1621 mb->ol_flags |= pkt_flags;
1624 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1625 rxq->rx_stage[i + j] = rxep[j].mbuf;
1627 if (nb_dd != ICE_LOOK_AHEAD)
1631 /* Clear software ring entries */
1632 for (i = 0; i < nb_rx; i++)
1633 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1635 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1636 "port_id=%u, queue_id=%u, nb_rx=%d",
1637 rxq->port_id, rxq->queue_id, nb_rx);
1642 static inline uint16_t
1643 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1644 struct rte_mbuf **rx_pkts,
1648 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1650 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1652 for (i = 0; i < nb_pkts; i++)
1653 rx_pkts[i] = stage[i];
1655 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1656 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1662 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1664 volatile union ice_rx_flex_desc *rxdp;
1665 struct ice_rx_entry *rxep;
1666 struct rte_mbuf *mb;
1667 uint16_t alloc_idx, i;
1671 /* Allocate buffers in bulk */
1672 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1673 (rxq->rx_free_thresh - 1));
1674 rxep = &rxq->sw_ring[alloc_idx];
1675 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1676 rxq->rx_free_thresh);
1677 if (unlikely(diag != 0)) {
1678 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1682 rxdp = &rxq->rx_ring[alloc_idx];
1683 for (i = 0; i < rxq->rx_free_thresh; i++) {
1684 if (likely(i < (rxq->rx_free_thresh - 1)))
1685 /* Prefetch next mbuf */
1686 rte_prefetch0(rxep[i + 1].mbuf);
1689 rte_mbuf_refcnt_set(mb, 1);
1691 mb->data_off = RTE_PKTMBUF_HEADROOM;
1693 mb->port = rxq->port_id;
1694 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1695 rxdp[i].read.hdr_addr = 0;
1696 rxdp[i].read.pkt_addr = dma_addr;
1699 /* Update rx tail regsiter */
1700 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1702 rxq->rx_free_trigger =
1703 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1704 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1705 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1710 static inline uint16_t
1711 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1713 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1719 if (rxq->rx_nb_avail)
1720 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1722 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1723 rxq->rx_next_avail = 0;
1724 rxq->rx_nb_avail = nb_rx;
1725 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1727 if (rxq->rx_tail > rxq->rx_free_trigger) {
1728 if (ice_rx_alloc_bufs(rxq) != 0) {
1731 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
1732 rxq->rx_free_thresh;
1733 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1734 "port_id=%u, queue_id=%u",
1735 rxq->port_id, rxq->queue_id);
1736 rxq->rx_nb_avail = 0;
1737 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1738 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1739 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1745 if (rxq->rx_tail >= rxq->nb_rx_desc)
1748 if (rxq->rx_nb_avail)
1749 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1755 ice_recv_pkts_bulk_alloc(void *rx_queue,
1756 struct rte_mbuf **rx_pkts,
1763 if (unlikely(nb_pkts == 0))
1766 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1767 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1770 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1771 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1772 nb_rx = (uint16_t)(nb_rx + count);
1773 nb_pkts = (uint16_t)(nb_pkts - count);
1782 ice_recv_scattered_pkts(void *rx_queue,
1783 struct rte_mbuf **rx_pkts,
1786 struct ice_rx_queue *rxq = rx_queue;
1787 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1788 volatile union ice_rx_flex_desc *rxdp;
1789 union ice_rx_flex_desc rxd;
1790 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1791 struct ice_rx_entry *rxe;
1792 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1793 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1794 struct rte_mbuf *nmb; /* new allocated mbuf */
1795 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1796 uint16_t rx_id = rxq->rx_tail;
1798 uint16_t nb_hold = 0;
1799 uint16_t rx_packet_len;
1800 uint16_t rx_stat_err0;
1803 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1804 struct ice_vsi *vsi = rxq->vsi;
1805 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1808 while (nb_rx < nb_pkts) {
1809 rxdp = &rx_ring[rx_id];
1810 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1812 /* Check the DD bit first */
1813 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1817 nmb = rte_mbuf_raw_alloc(rxq->mp);
1818 if (unlikely(!nmb)) {
1819 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
1822 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1825 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1827 if (unlikely(rx_id == rxq->nb_rx_desc))
1830 /* Prefetch next mbuf */
1831 rte_prefetch0(sw_ring[rx_id].mbuf);
1834 * When next RX descriptor is on a cache line boundary,
1835 * prefetch the next 4 RX descriptors and next 8 pointers
1838 if ((rx_id & 0x3) == 0) {
1839 rte_prefetch0(&rx_ring[rx_id]);
1840 rte_prefetch0(&sw_ring[rx_id]);
1846 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1848 /* Set data buffer address and data length of the mbuf */
1849 rxdp->read.hdr_addr = 0;
1850 rxdp->read.pkt_addr = dma_addr;
1851 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1852 ICE_RX_FLX_DESC_PKT_LEN_M;
1853 rxm->data_len = rx_packet_len;
1854 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1857 * If this is the first buffer of the received packet, set the
1858 * pointer to the first mbuf of the packet and initialize its
1859 * context. Otherwise, update the total length and the number
1860 * of segments of the current scattered packet, and update the
1861 * pointer to the last mbuf of the current packet.
1865 first_seg->nb_segs = 1;
1866 first_seg->pkt_len = rx_packet_len;
1868 first_seg->pkt_len =
1869 (uint16_t)(first_seg->pkt_len +
1871 first_seg->nb_segs++;
1872 last_seg->next = rxm;
1876 * If this is not the last buffer of the received packet,
1877 * update the pointer to the last mbuf of the current scattered
1878 * packet and continue to parse the RX ring.
1880 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1886 * This is the last buffer of the received packet. If the CRC
1887 * is not stripped by the hardware:
1888 * - Subtract the CRC length from the total packet length.
1889 * - If the last buffer only contains the whole CRC or a part
1890 * of it, free the mbuf associated to the last buffer. If part
1891 * of the CRC is also contained in the previous mbuf, subtract
1892 * the length of that CRC part from the data length of the
1896 if (unlikely(rxq->crc_len > 0)) {
1897 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1898 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1899 rte_pktmbuf_free_seg(rxm);
1900 first_seg->nb_segs--;
1901 last_seg->data_len =
1902 (uint16_t)(last_seg->data_len -
1903 (RTE_ETHER_CRC_LEN - rx_packet_len));
1904 last_seg->next = NULL;
1906 rxm->data_len = (uint16_t)(rx_packet_len -
1910 first_seg->port = rxq->port_id;
1911 first_seg->ol_flags = 0;
1912 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1913 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1914 ice_rxd_to_vlan_tci(first_seg, &rxd);
1915 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1916 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1918 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1919 ts_ns = ice_tstamp_convert_32b_64b(hw,
1920 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
1921 if (ice_timestamp_dynflag > 0) {
1922 *RTE_MBUF_DYNFIELD(first_seg,
1923 ice_timestamp_dynfield_offset,
1924 rte_mbuf_timestamp_t *) = ts_ns;
1925 first_seg->ol_flags |= ice_timestamp_dynflag;
1929 first_seg->ol_flags |= pkt_flags;
1930 /* Prefetch data of first segment, if configured to do so. */
1931 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1932 first_seg->data_off));
1933 rx_pkts[nb_rx++] = first_seg;
1937 /* Record index of the next RX descriptor to probe. */
1938 rxq->rx_tail = rx_id;
1939 rxq->pkt_first_seg = first_seg;
1940 rxq->pkt_last_seg = last_seg;
1943 * If the number of free RX descriptors is greater than the RX free
1944 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1945 * register. Update the RDT with the value of the last processed RX
1946 * descriptor minus 1, to guarantee that the RDT register is never
1947 * equal to the RDH register, which creates a "full" ring situtation
1948 * from the hardware point of view.
1950 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1951 if (nb_hold > rxq->rx_free_thresh) {
1952 rx_id = (uint16_t)(rx_id == 0 ?
1953 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1954 /* write TAIL register */
1955 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1958 rxq->nb_rx_hold = nb_hold;
1960 /* return received packet in the burst */
1965 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1967 struct ice_adapter *ad =
1968 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1969 const uint32_t *ptypes;
1971 static const uint32_t ptypes_os[] = {
1972 /* refers to ice_get_default_pkt_type() */
1974 RTE_PTYPE_L2_ETHER_TIMESYNC,
1975 RTE_PTYPE_L2_ETHER_LLDP,
1976 RTE_PTYPE_L2_ETHER_ARP,
1977 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1978 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1981 RTE_PTYPE_L4_NONFRAG,
1985 RTE_PTYPE_TUNNEL_GRENAT,
1986 RTE_PTYPE_TUNNEL_IP,
1987 RTE_PTYPE_INNER_L2_ETHER,
1988 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1989 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1990 RTE_PTYPE_INNER_L4_FRAG,
1991 RTE_PTYPE_INNER_L4_ICMP,
1992 RTE_PTYPE_INNER_L4_NONFRAG,
1993 RTE_PTYPE_INNER_L4_SCTP,
1994 RTE_PTYPE_INNER_L4_TCP,
1995 RTE_PTYPE_INNER_L4_UDP,
1999 static const uint32_t ptypes_comms[] = {
2000 /* refers to ice_get_default_pkt_type() */
2002 RTE_PTYPE_L2_ETHER_TIMESYNC,
2003 RTE_PTYPE_L2_ETHER_LLDP,
2004 RTE_PTYPE_L2_ETHER_ARP,
2005 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2006 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2009 RTE_PTYPE_L4_NONFRAG,
2013 RTE_PTYPE_TUNNEL_GRENAT,
2014 RTE_PTYPE_TUNNEL_IP,
2015 RTE_PTYPE_INNER_L2_ETHER,
2016 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2017 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2018 RTE_PTYPE_INNER_L4_FRAG,
2019 RTE_PTYPE_INNER_L4_ICMP,
2020 RTE_PTYPE_INNER_L4_NONFRAG,
2021 RTE_PTYPE_INNER_L4_SCTP,
2022 RTE_PTYPE_INNER_L4_TCP,
2023 RTE_PTYPE_INNER_L4_UDP,
2024 RTE_PTYPE_TUNNEL_GTPC,
2025 RTE_PTYPE_TUNNEL_GTPU,
2026 RTE_PTYPE_L2_ETHER_PPPOE,
2030 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
2031 ptypes = ptypes_comms;
2035 if (dev->rx_pkt_burst == ice_recv_pkts ||
2036 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
2037 dev->rx_pkt_burst == ice_recv_scattered_pkts)
2041 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
2042 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
2043 #ifdef CC_AVX512_SUPPORT
2044 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
2045 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
2046 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
2047 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2049 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2050 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
2051 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
2052 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
2060 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2062 volatile union ice_rx_flex_desc *rxdp;
2063 struct ice_rx_queue *rxq = rx_queue;
2066 if (unlikely(offset >= rxq->nb_rx_desc))
2069 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2070 return RTE_ETH_RX_DESC_UNAVAIL;
2072 desc = rxq->rx_tail + offset;
2073 if (desc >= rxq->nb_rx_desc)
2074 desc -= rxq->nb_rx_desc;
2076 rxdp = &rxq->rx_ring[desc];
2077 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2078 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2079 return RTE_ETH_RX_DESC_DONE;
2081 return RTE_ETH_RX_DESC_AVAIL;
2085 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2087 struct ice_tx_queue *txq = tx_queue;
2088 volatile uint64_t *status;
2089 uint64_t mask, expect;
2092 if (unlikely(offset >= txq->nb_tx_desc))
2095 desc = txq->tx_tail + offset;
2096 /* go to next desc that has the RS bit */
2097 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2099 if (desc >= txq->nb_tx_desc) {
2100 desc -= txq->nb_tx_desc;
2101 if (desc >= txq->nb_tx_desc)
2102 desc -= txq->nb_tx_desc;
2105 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2106 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2107 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2108 ICE_TXD_QW1_DTYPE_S);
2109 if ((*status & mask) == expect)
2110 return RTE_ETH_TX_DESC_DONE;
2112 return RTE_ETH_TX_DESC_FULL;
2116 ice_free_queues(struct rte_eth_dev *dev)
2120 PMD_INIT_FUNC_TRACE();
2122 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2123 if (!dev->data->rx_queues[i])
2125 ice_rx_queue_release(dev->data->rx_queues[i]);
2126 dev->data->rx_queues[i] = NULL;
2127 rte_eth_dma_zone_free(dev, "rx_ring", i);
2129 dev->data->nb_rx_queues = 0;
2131 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2132 if (!dev->data->tx_queues[i])
2134 ice_tx_queue_release(dev->data->tx_queues[i]);
2135 dev->data->tx_queues[i] = NULL;
2136 rte_eth_dma_zone_free(dev, "tx_ring", i);
2138 dev->data->nb_tx_queues = 0;
2141 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
2142 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
2145 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2147 struct ice_tx_queue *txq;
2148 const struct rte_memzone *tz = NULL;
2150 struct rte_eth_dev *dev;
2153 PMD_DRV_LOG(ERR, "PF is not available");
2157 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2159 /* Allocate the TX queue data structure. */
2160 txq = rte_zmalloc_socket("ice fdir tx queue",
2161 sizeof(struct ice_tx_queue),
2162 RTE_CACHE_LINE_SIZE,
2165 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2166 "tx queue structure.");
2170 /* Allocate TX hardware ring descriptors. */
2171 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2172 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2174 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2175 ICE_FDIR_QUEUE_ID, ring_size,
2176 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2178 ice_tx_queue_release(txq);
2179 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2183 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2184 txq->queue_id = ICE_FDIR_QUEUE_ID;
2185 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2186 txq->vsi = pf->fdir.fdir_vsi;
2188 txq->tx_ring_dma = tz->iova;
2189 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2191 * don't need to allocate software ring and reset for the fdir
2192 * program queue just set the queue has been configured.
2197 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2203 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2205 struct ice_rx_queue *rxq;
2206 const struct rte_memzone *rz = NULL;
2208 struct rte_eth_dev *dev;
2211 PMD_DRV_LOG(ERR, "PF is not available");
2215 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2217 /* Allocate the RX queue data structure. */
2218 rxq = rte_zmalloc_socket("ice fdir rx queue",
2219 sizeof(struct ice_rx_queue),
2220 RTE_CACHE_LINE_SIZE,
2223 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2224 "rx queue structure.");
2228 /* Allocate RX hardware ring descriptors. */
2229 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2230 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2232 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2233 ICE_FDIR_QUEUE_ID, ring_size,
2234 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2236 ice_rx_queue_release(rxq);
2237 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2241 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2242 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2243 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2244 rxq->vsi = pf->fdir.fdir_vsi;
2246 rxq->rx_ring_dma = rz->iova;
2247 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2248 sizeof(union ice_32byte_rx_desc));
2249 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2252 * Don't need to allocate software ring and reset for the fdir
2253 * rx queue, just set the queue has been configured.
2258 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2264 ice_recv_pkts(void *rx_queue,
2265 struct rte_mbuf **rx_pkts,
2268 struct ice_rx_queue *rxq = rx_queue;
2269 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2270 volatile union ice_rx_flex_desc *rxdp;
2271 union ice_rx_flex_desc rxd;
2272 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2273 struct ice_rx_entry *rxe;
2274 struct rte_mbuf *nmb; /* new allocated mbuf */
2275 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2276 uint16_t rx_id = rxq->rx_tail;
2278 uint16_t nb_hold = 0;
2279 uint16_t rx_packet_len;
2280 uint16_t rx_stat_err0;
2283 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2284 struct ice_vsi *vsi = rxq->vsi;
2285 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2288 while (nb_rx < nb_pkts) {
2289 rxdp = &rx_ring[rx_id];
2290 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2292 /* Check the DD bit first */
2293 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2297 nmb = rte_mbuf_raw_alloc(rxq->mp);
2298 if (unlikely(!nmb)) {
2299 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2302 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2305 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2307 if (unlikely(rx_id == rxq->nb_rx_desc))
2312 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2315 * fill the read format of descriptor with physic address in
2316 * new allocated mbuf: nmb
2318 rxdp->read.hdr_addr = 0;
2319 rxdp->read.pkt_addr = dma_addr;
2321 /* calculate rx_packet_len of the received pkt */
2322 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2323 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2325 /* fill old mbuf with received descriptor: rxd */
2326 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2327 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2330 rxm->pkt_len = rx_packet_len;
2331 rxm->data_len = rx_packet_len;
2332 rxm->port = rxq->port_id;
2333 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2334 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2335 ice_rxd_to_vlan_tci(rxm, &rxd);
2336 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
2337 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2339 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
2340 ts_ns = ice_tstamp_convert_32b_64b(hw,
2341 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
2342 if (ice_timestamp_dynflag > 0) {
2343 *RTE_MBUF_DYNFIELD(rxm,
2344 ice_timestamp_dynfield_offset,
2345 rte_mbuf_timestamp_t *) = ts_ns;
2346 rxm->ol_flags |= ice_timestamp_dynflag;
2350 rxm->ol_flags |= pkt_flags;
2351 /* copy old mbuf to rx_pkts */
2352 rx_pkts[nb_rx++] = rxm;
2354 rxq->rx_tail = rx_id;
2356 * If the number of free RX descriptors is greater than the RX free
2357 * threshold of the queue, advance the receive tail register of queue.
2358 * Update that register with the value of the last processed RX
2359 * descriptor minus 1.
2361 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2362 if (nb_hold > rxq->rx_free_thresh) {
2363 rx_id = (uint16_t)(rx_id == 0 ?
2364 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2365 /* write TAIL register */
2366 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2369 rxq->nb_rx_hold = nb_hold;
2371 /* return received packet in the burst */
2376 ice_parse_tunneling_params(uint64_t ol_flags,
2377 union ice_tx_offload tx_offload,
2378 uint32_t *cd_tunneling)
2380 /* EIPT: External (outer) IP header type */
2381 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2382 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2383 else if (ol_flags & PKT_TX_OUTER_IPV4)
2384 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2385 else if (ol_flags & PKT_TX_OUTER_IPV6)
2386 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2388 /* EIPLEN: External (outer) IP header length, in DWords */
2389 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2390 ICE_TXD_CTX_QW0_EIPLEN_S;
2392 /* L4TUNT: L4 Tunneling Type */
2393 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2394 case PKT_TX_TUNNEL_IPIP:
2395 /* for non UDP / GRE tunneling, set to 00b */
2397 case PKT_TX_TUNNEL_VXLAN:
2398 case PKT_TX_TUNNEL_GTP:
2399 case PKT_TX_TUNNEL_GENEVE:
2400 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2402 case PKT_TX_TUNNEL_GRE:
2403 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2406 PMD_TX_LOG(ERR, "Tunnel type not supported");
2410 /* L4TUNLEN: L4 Tunneling Length, in Words
2412 * We depend on app to set rte_mbuf.l2_len correctly.
2413 * For IP in GRE it should be set to the length of the GRE
2415 * For MAC in GRE or MAC in UDP it should be set to the length
2416 * of the GRE or UDP headers plus the inner MAC up to including
2417 * its last Ethertype.
2418 * If MPLS labels exists, it should include them as well.
2420 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2421 ICE_TXD_CTX_QW0_NATLEN_S;
2424 * Calculate the tunneling UDP checksum.
2425 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2427 if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
2428 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2429 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2433 ice_txd_enable_checksum(uint64_t ol_flags,
2435 uint32_t *td_offset,
2436 union ice_tx_offload tx_offload)
2439 if (ol_flags & PKT_TX_TUNNEL_MASK)
2440 *td_offset |= (tx_offload.outer_l2_len >> 1)
2441 << ICE_TX_DESC_LEN_MACLEN_S;
2443 *td_offset |= (tx_offload.l2_len >> 1)
2444 << ICE_TX_DESC_LEN_MACLEN_S;
2446 /* Enable L3 checksum offloads */
2447 if (ol_flags & PKT_TX_IP_CKSUM) {
2448 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2449 *td_offset |= (tx_offload.l3_len >> 2) <<
2450 ICE_TX_DESC_LEN_IPLEN_S;
2451 } else if (ol_flags & PKT_TX_IPV4) {
2452 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2453 *td_offset |= (tx_offload.l3_len >> 2) <<
2454 ICE_TX_DESC_LEN_IPLEN_S;
2455 } else if (ol_flags & PKT_TX_IPV6) {
2456 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2457 *td_offset |= (tx_offload.l3_len >> 2) <<
2458 ICE_TX_DESC_LEN_IPLEN_S;
2461 if (ol_flags & PKT_TX_TCP_SEG) {
2462 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2463 *td_offset |= (tx_offload.l4_len >> 2) <<
2464 ICE_TX_DESC_LEN_L4_LEN_S;
2468 /* Enable L4 checksum offloads */
2469 switch (ol_flags & PKT_TX_L4_MASK) {
2470 case PKT_TX_TCP_CKSUM:
2471 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2472 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2473 ICE_TX_DESC_LEN_L4_LEN_S;
2475 case PKT_TX_SCTP_CKSUM:
2476 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2477 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2478 ICE_TX_DESC_LEN_L4_LEN_S;
2480 case PKT_TX_UDP_CKSUM:
2481 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2482 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2483 ICE_TX_DESC_LEN_L4_LEN_S;
2491 ice_xmit_cleanup(struct ice_tx_queue *txq)
2493 struct ice_tx_entry *sw_ring = txq->sw_ring;
2494 volatile struct ice_tx_desc *txd = txq->tx_ring;
2495 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2496 uint16_t nb_tx_desc = txq->nb_tx_desc;
2497 uint16_t desc_to_clean_to;
2498 uint16_t nb_tx_to_clean;
2500 /* Determine the last descriptor needing to be cleaned */
2501 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2502 if (desc_to_clean_to >= nb_tx_desc)
2503 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2505 /* Check to make sure the last descriptor to clean is done */
2506 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2507 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2508 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2509 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2510 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2512 txq->port_id, txq->queue_id,
2513 txd[desc_to_clean_to].cmd_type_offset_bsz);
2514 /* Failed to clean any descriptors */
2518 /* Figure out how many descriptors will be cleaned */
2519 if (last_desc_cleaned > desc_to_clean_to)
2520 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2523 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2526 /* The last descriptor to clean is done, so that means all the
2527 * descriptors from the last descriptor that was cleaned
2528 * up to the last descriptor with the RS bit set
2529 * are done. Only reset the threshold descriptor.
2531 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2533 /* Update the txq to reflect the last descriptor that was cleaned */
2534 txq->last_desc_cleaned = desc_to_clean_to;
2535 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2540 /* Construct the tx flags */
2541 static inline uint64_t
2542 ice_build_ctob(uint32_t td_cmd,
2547 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2548 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2549 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2550 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2551 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2554 /* Check if the context descriptor is needed for TX offloading */
2555 static inline uint16_t
2556 ice_calc_context_desc(uint64_t flags)
2558 static uint64_t mask = PKT_TX_TCP_SEG |
2560 PKT_TX_OUTER_IP_CKSUM |
2563 return (flags & mask) ? 1 : 0;
2566 /* set ice TSO context descriptor */
2567 static inline uint64_t
2568 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2570 uint64_t ctx_desc = 0;
2571 uint32_t cd_cmd, hdr_len, cd_tso_len;
2573 if (!tx_offload.l4_len) {
2574 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2578 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2579 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2580 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2582 cd_cmd = ICE_TX_CTX_DESC_TSO;
2583 cd_tso_len = mbuf->pkt_len - hdr_len;
2584 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2585 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2586 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2591 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2592 #define ICE_MAX_DATA_PER_TXD \
2593 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2594 /* Calculate the number of TX descriptors needed for each pkt */
2595 static inline uint16_t
2596 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2598 struct rte_mbuf *txd = tx_pkt;
2601 while (txd != NULL) {
2602 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2610 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2612 struct ice_tx_queue *txq;
2613 volatile struct ice_tx_desc *tx_ring;
2614 volatile struct ice_tx_desc *txd;
2615 struct ice_tx_entry *sw_ring;
2616 struct ice_tx_entry *txe, *txn;
2617 struct rte_mbuf *tx_pkt;
2618 struct rte_mbuf *m_seg;
2619 uint32_t cd_tunneling_params;
2624 uint32_t td_cmd = 0;
2625 uint32_t td_offset = 0;
2626 uint32_t td_tag = 0;
2629 uint64_t buf_dma_addr;
2631 union ice_tx_offload tx_offload = {0};
2634 sw_ring = txq->sw_ring;
2635 tx_ring = txq->tx_ring;
2636 tx_id = txq->tx_tail;
2637 txe = &sw_ring[tx_id];
2639 /* Check if the descriptor ring needs to be cleaned. */
2640 if (txq->nb_tx_free < txq->tx_free_thresh)
2641 (void)ice_xmit_cleanup(txq);
2643 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2644 tx_pkt = *tx_pkts++;
2649 ol_flags = tx_pkt->ol_flags;
2650 tx_offload.l2_len = tx_pkt->l2_len;
2651 tx_offload.l3_len = tx_pkt->l3_len;
2652 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2653 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2654 tx_offload.l4_len = tx_pkt->l4_len;
2655 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2656 /* Calculate the number of context descriptors needed. */
2657 nb_ctx = ice_calc_context_desc(ol_flags);
2659 /* The number of descriptors that must be allocated for
2660 * a packet equals to the number of the segments of that
2661 * packet plus the number of context descriptor if needed.
2662 * Recalculate the needed tx descs when TSO enabled in case
2663 * the mbuf data size exceeds max data size that hw allows
2666 if (ol_flags & PKT_TX_TCP_SEG)
2667 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2670 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2671 tx_last = (uint16_t)(tx_id + nb_used - 1);
2674 if (tx_last >= txq->nb_tx_desc)
2675 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2677 if (nb_used > txq->nb_tx_free) {
2678 if (ice_xmit_cleanup(txq) != 0) {
2683 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2684 while (nb_used > txq->nb_tx_free) {
2685 if (ice_xmit_cleanup(txq) != 0) {
2694 /* Descriptor based VLAN insertion */
2695 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2696 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2697 td_tag = tx_pkt->vlan_tci;
2700 /* Fill in tunneling parameters if necessary */
2701 cd_tunneling_params = 0;
2702 if (ol_flags & PKT_TX_TUNNEL_MASK)
2703 ice_parse_tunneling_params(ol_flags, tx_offload,
2704 &cd_tunneling_params);
2706 /* Enable checksum offloading */
2707 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2708 ice_txd_enable_checksum(ol_flags, &td_cmd,
2709 &td_offset, tx_offload);
2712 /* Setup TX context descriptor if required */
2713 volatile struct ice_tx_ctx_desc *ctx_txd =
2714 (volatile struct ice_tx_ctx_desc *)
2716 uint16_t cd_l2tag2 = 0;
2717 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2719 txn = &sw_ring[txe->next_id];
2720 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2722 rte_pktmbuf_free_seg(txe->mbuf);
2726 if (ol_flags & PKT_TX_TCP_SEG)
2727 cd_type_cmd_tso_mss |=
2728 ice_set_tso_ctx(tx_pkt, tx_offload);
2730 ctx_txd->tunneling_params =
2731 rte_cpu_to_le_32(cd_tunneling_params);
2733 /* TX context descriptor based double VLAN insert */
2734 if (ol_flags & PKT_TX_QINQ) {
2735 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2736 cd_type_cmd_tso_mss |=
2737 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2738 ICE_TXD_CTX_QW1_CMD_S);
2740 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2742 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2744 txe->last_id = tx_last;
2745 tx_id = txe->next_id;
2751 txd = &tx_ring[tx_id];
2752 txn = &sw_ring[txe->next_id];
2755 rte_pktmbuf_free_seg(txe->mbuf);
2758 /* Setup TX Descriptor */
2759 slen = m_seg->data_len;
2760 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2762 while ((ol_flags & PKT_TX_TCP_SEG) &&
2763 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2764 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2765 txd->cmd_type_offset_bsz =
2766 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2767 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2768 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2769 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2770 ICE_TXD_QW1_TX_BUF_SZ_S) |
2771 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2773 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2774 slen -= ICE_MAX_DATA_PER_TXD;
2776 txe->last_id = tx_last;
2777 tx_id = txe->next_id;
2779 txd = &tx_ring[tx_id];
2780 txn = &sw_ring[txe->next_id];
2783 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2784 txd->cmd_type_offset_bsz =
2785 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2786 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2787 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2788 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2789 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2791 txe->last_id = tx_last;
2792 tx_id = txe->next_id;
2794 m_seg = m_seg->next;
2797 /* fill the last descriptor with End of Packet (EOP) bit */
2798 td_cmd |= ICE_TX_DESC_CMD_EOP;
2799 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2800 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2802 /* set RS bit on the last descriptor of one packet */
2803 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2805 "Setting RS bit on TXD id="
2806 "%4u (port=%d queue=%d)",
2807 tx_last, txq->port_id, txq->queue_id);
2809 td_cmd |= ICE_TX_DESC_CMD_RS;
2811 /* Update txq RS bit counters */
2812 txq->nb_tx_used = 0;
2814 txd->cmd_type_offset_bsz |=
2815 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2819 /* update Tail register */
2820 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2821 txq->tx_tail = tx_id;
2826 static __rte_always_inline int
2827 ice_tx_free_bufs(struct ice_tx_queue *txq)
2829 struct ice_tx_entry *txep;
2832 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2833 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2834 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2837 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2839 for (i = 0; i < txq->tx_rs_thresh; i++)
2840 rte_prefetch0((txep + i)->mbuf);
2842 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2843 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2844 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2848 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2849 rte_pktmbuf_free_seg(txep->mbuf);
2854 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2855 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2856 if (txq->tx_next_dd >= txq->nb_tx_desc)
2857 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2859 return txq->tx_rs_thresh;
2863 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2866 struct ice_tx_entry *swr_ring = txq->sw_ring;
2867 uint16_t i, tx_last, tx_id;
2868 uint16_t nb_tx_free_last;
2869 uint16_t nb_tx_to_clean;
2872 /* Start free mbuf from the next of tx_tail */
2873 tx_last = txq->tx_tail;
2874 tx_id = swr_ring[tx_last].next_id;
2876 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2879 nb_tx_to_clean = txq->nb_tx_free;
2880 nb_tx_free_last = txq->nb_tx_free;
2882 free_cnt = txq->nb_tx_desc;
2884 /* Loop through swr_ring to count the amount of
2885 * freeable mubfs and packets.
2887 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2888 for (i = 0; i < nb_tx_to_clean &&
2889 pkt_cnt < free_cnt &&
2890 tx_id != tx_last; i++) {
2891 if (swr_ring[tx_id].mbuf != NULL) {
2892 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2893 swr_ring[tx_id].mbuf = NULL;
2896 * last segment in the packet,
2897 * increment packet count
2899 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2902 tx_id = swr_ring[tx_id].next_id;
2905 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2906 txq->nb_tx_free || tx_id == tx_last)
2909 if (pkt_cnt < free_cnt) {
2910 if (ice_xmit_cleanup(txq))
2913 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2914 nb_tx_free_last = txq->nb_tx_free;
2918 return (int)pkt_cnt;
2923 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2924 uint32_t free_cnt __rte_unused)
2931 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2936 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2937 free_cnt = txq->nb_tx_desc;
2939 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2941 for (i = 0; i < cnt; i += n) {
2942 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2945 n = ice_tx_free_bufs(txq);
2955 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2957 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2958 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2959 struct ice_adapter *ad =
2960 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2963 if (ad->tx_vec_allowed)
2964 return ice_tx_done_cleanup_vec(q, free_cnt);
2966 if (ad->tx_simple_allowed)
2967 return ice_tx_done_cleanup_simple(q, free_cnt);
2969 return ice_tx_done_cleanup_full(q, free_cnt);
2972 /* Populate 4 descriptors with data from 4 mbufs */
2974 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2979 for (i = 0; i < 4; i++, txdp++, pkts++) {
2980 dma_addr = rte_mbuf_data_iova(*pkts);
2981 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2982 txdp->cmd_type_offset_bsz =
2983 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2984 (*pkts)->data_len, 0);
2988 /* Populate 1 descriptor with data from 1 mbuf */
2990 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2994 dma_addr = rte_mbuf_data_iova(*pkts);
2995 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2996 txdp->cmd_type_offset_bsz =
2997 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2998 (*pkts)->data_len, 0);
3002 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
3005 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
3006 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
3007 const int N_PER_LOOP = 4;
3008 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
3009 int mainpart, leftover;
3013 * Process most of the packets in chunks of N pkts. Any
3014 * leftover packets will get processed one at a time.
3016 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
3017 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
3018 for (i = 0; i < mainpart; i += N_PER_LOOP) {
3019 /* Copy N mbuf pointers to the S/W ring */
3020 for (j = 0; j < N_PER_LOOP; ++j)
3021 (txep + i + j)->mbuf = *(pkts + i + j);
3022 tx4(txdp + i, pkts + i);
3025 if (unlikely(leftover > 0)) {
3026 for (i = 0; i < leftover; ++i) {
3027 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
3028 tx1(txdp + mainpart + i, pkts + mainpart + i);
3033 static inline uint16_t
3034 tx_xmit_pkts(struct ice_tx_queue *txq,
3035 struct rte_mbuf **tx_pkts,
3038 volatile struct ice_tx_desc *txr = txq->tx_ring;
3042 * Begin scanning the H/W ring for done descriptors when the number
3043 * of available descriptors drops below tx_free_thresh. For each done
3044 * descriptor, free the associated buffer.
3046 if (txq->nb_tx_free < txq->tx_free_thresh)
3047 ice_tx_free_bufs(txq);
3049 /* Use available descriptor only */
3050 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
3051 if (unlikely(!nb_pkts))
3054 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
3055 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
3056 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
3057 ice_tx_fill_hw_ring(txq, tx_pkts, n);
3058 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3059 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3061 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3065 /* Fill hardware descriptor ring with mbuf data */
3066 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3067 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3069 /* Determin if RS bit needs to be set */
3070 if (txq->tx_tail > txq->tx_next_rs) {
3071 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3072 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3075 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3076 if (txq->tx_next_rs >= txq->nb_tx_desc)
3077 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3080 if (txq->tx_tail >= txq->nb_tx_desc)
3083 /* Update the tx tail register */
3084 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3090 ice_xmit_pkts_simple(void *tx_queue,
3091 struct rte_mbuf **tx_pkts,
3096 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3097 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3101 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3104 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3105 &tx_pkts[nb_tx], num);
3106 nb_tx = (uint16_t)(nb_tx + ret);
3107 nb_pkts = (uint16_t)(nb_pkts - ret);
3116 ice_set_rx_function(struct rte_eth_dev *dev)
3118 PMD_INIT_FUNC_TRACE();
3119 struct ice_adapter *ad =
3120 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3122 struct ice_rx_queue *rxq;
3124 int rx_check_ret = -1;
3126 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3127 ad->rx_use_avx512 = false;
3128 ad->rx_use_avx2 = false;
3129 rx_check_ret = ice_rx_vec_dev_check(dev);
3130 if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3131 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3132 ad->rx_vec_allowed = true;
3133 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3134 rxq = dev->data->rx_queues[i];
3135 if (rxq && ice_rxq_vec_setup(rxq)) {
3136 ad->rx_vec_allowed = false;
3141 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3142 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3143 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3144 #ifdef CC_AVX512_SUPPORT
3145 ad->rx_use_avx512 = true;
3148 "AVX512 is not supported in build env");
3150 if (!ad->rx_use_avx512 &&
3151 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3152 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3153 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3154 ad->rx_use_avx2 = true;
3157 ad->rx_vec_allowed = false;
3161 if (ad->rx_vec_allowed) {
3162 if (dev->data->scattered_rx) {
3163 if (ad->rx_use_avx512) {
3164 #ifdef CC_AVX512_SUPPORT
3165 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3167 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3168 dev->data->port_id);
3170 ice_recv_scattered_pkts_vec_avx512_offload;
3173 "Using AVX512 Vector Scattered Rx (port %d).",
3174 dev->data->port_id);
3176 ice_recv_scattered_pkts_vec_avx512;
3179 } else if (ad->rx_use_avx2) {
3180 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3182 "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
3183 dev->data->port_id);
3185 ice_recv_scattered_pkts_vec_avx2_offload;
3188 "Using AVX2 Vector Scattered Rx (port %d).",
3189 dev->data->port_id);
3191 ice_recv_scattered_pkts_vec_avx2;
3195 "Using Vector Scattered Rx (port %d).",
3196 dev->data->port_id);
3197 dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
3200 if (ad->rx_use_avx512) {
3201 #ifdef CC_AVX512_SUPPORT
3202 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3204 "Using AVX512 OFFLOAD Vector Rx (port %d).",
3205 dev->data->port_id);
3207 ice_recv_pkts_vec_avx512_offload;
3210 "Using AVX512 Vector Rx (port %d).",
3211 dev->data->port_id);
3213 ice_recv_pkts_vec_avx512;
3216 } else if (ad->rx_use_avx2) {
3217 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3219 "Using AVX2 OFFLOAD Vector Rx (port %d).",
3220 dev->data->port_id);
3222 ice_recv_pkts_vec_avx2_offload;
3225 "Using AVX2 Vector Rx (port %d).",
3226 dev->data->port_id);
3228 ice_recv_pkts_vec_avx2;
3232 "Using Vector Rx (port %d).",
3233 dev->data->port_id);
3234 dev->rx_pkt_burst = ice_recv_pkts_vec;
3242 if (dev->data->scattered_rx) {
3243 /* Set the non-LRO scattered function */
3245 "Using a Scattered function on port %d.",
3246 dev->data->port_id);
3247 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3248 } else if (ad->rx_bulk_alloc_allowed) {
3250 "Rx Burst Bulk Alloc Preconditions are "
3251 "satisfied. Rx Burst Bulk Alloc function "
3252 "will be used on port %d.",
3253 dev->data->port_id);
3254 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3257 "Rx Burst Bulk Alloc Preconditions are not "
3258 "satisfied, Normal Rx will be used on port %d.",
3259 dev->data->port_id);
3260 dev->rx_pkt_burst = ice_recv_pkts;
3264 static const struct {
3265 eth_rx_burst_t pkt_burst;
3267 } ice_rx_burst_infos[] = {
3268 { ice_recv_scattered_pkts, "Scalar Scattered" },
3269 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3270 { ice_recv_pkts, "Scalar" },
3272 #ifdef CC_AVX512_SUPPORT
3273 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3274 { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3275 { ice_recv_pkts_vec_avx512, "Vector AVX512" },
3276 { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3278 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3279 { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
3280 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3281 { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
3282 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3283 { ice_recv_pkts_vec, "Vector SSE" },
3288 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3289 struct rte_eth_burst_mode *mode)
3291 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3295 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3296 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3297 snprintf(mode->info, sizeof(mode->info), "%s",
3298 ice_rx_burst_infos[i].info);
3308 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3310 struct ice_adapter *ad =
3311 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3313 /* Use a simple Tx queue if possible (only fast free is allowed) */
3314 ad->tx_simple_allowed =
3316 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3317 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3319 if (ad->tx_simple_allowed)
3320 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3324 "Simple Tx can NOT be enabled on Tx queue %u.",
3328 /*********************************************************************
3332 **********************************************************************/
3333 /* The default values of TSO MSS */
3334 #define ICE_MIN_TSO_MSS 64
3335 #define ICE_MAX_TSO_MSS 9728
3336 #define ICE_MAX_TSO_FRAME_SIZE 262144
3338 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3345 for (i = 0; i < nb_pkts; i++) {
3347 ol_flags = m->ol_flags;
3349 if (ol_flags & PKT_TX_TCP_SEG &&
3350 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3351 m->tso_segsz > ICE_MAX_TSO_MSS ||
3352 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3354 * MSS outside the range are considered malicious
3360 #ifdef RTE_ETHDEV_DEBUG_TX
3361 ret = rte_validate_tx_offload(m);
3367 ret = rte_net_intel_cksum_prepare(m);
3377 ice_set_tx_function(struct rte_eth_dev *dev)
3379 struct ice_adapter *ad =
3380 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3382 struct ice_tx_queue *txq;
3384 int tx_check_ret = -1;
3386 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3387 ad->tx_use_avx2 = false;
3388 ad->tx_use_avx512 = false;
3389 tx_check_ret = ice_tx_vec_dev_check(dev);
3390 if (tx_check_ret >= 0 &&
3391 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3392 ad->tx_vec_allowed = true;
3394 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3395 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3396 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3397 #ifdef CC_AVX512_SUPPORT
3398 ad->tx_use_avx512 = true;
3401 "AVX512 is not supported in build env");
3403 if (!ad->tx_use_avx512 &&
3404 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3405 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3406 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3407 ad->tx_use_avx2 = true;
3409 if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
3410 tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3411 ad->tx_vec_allowed = false;
3413 if (ad->tx_vec_allowed) {
3414 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3415 txq = dev->data->tx_queues[i];
3416 if (txq && ice_txq_vec_setup(txq)) {
3417 ad->tx_vec_allowed = false;
3423 ad->tx_vec_allowed = false;
3427 if (ad->tx_vec_allowed) {
3428 dev->tx_pkt_prepare = NULL;
3429 if (ad->tx_use_avx512) {
3430 #ifdef CC_AVX512_SUPPORT
3431 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3433 "Using AVX512 OFFLOAD Vector Tx (port %d).",
3434 dev->data->port_id);
3436 ice_xmit_pkts_vec_avx512_offload;
3437 dev->tx_pkt_prepare = ice_prep_pkts;
3440 "Using AVX512 Vector Tx (port %d).",
3441 dev->data->port_id);
3442 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3446 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3448 "Using AVX2 OFFLOAD Vector Tx (port %d).",
3449 dev->data->port_id);
3451 ice_xmit_pkts_vec_avx2_offload;
3452 dev->tx_pkt_prepare = ice_prep_pkts;
3454 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3455 ad->tx_use_avx2 ? "avx2 " : "",
3456 dev->data->port_id);
3457 dev->tx_pkt_burst = ad->tx_use_avx2 ?
3458 ice_xmit_pkts_vec_avx2 :
3467 if (ad->tx_simple_allowed) {
3468 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3469 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3470 dev->tx_pkt_prepare = NULL;
3472 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3473 dev->tx_pkt_burst = ice_xmit_pkts;
3474 dev->tx_pkt_prepare = ice_prep_pkts;
3478 static const struct {
3479 eth_tx_burst_t pkt_burst;
3481 } ice_tx_burst_infos[] = {
3482 { ice_xmit_pkts_simple, "Scalar Simple" },
3483 { ice_xmit_pkts, "Scalar" },
3485 #ifdef CC_AVX512_SUPPORT
3486 { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3487 { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3489 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3490 { ice_xmit_pkts_vec, "Vector SSE" },
3495 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3496 struct rte_eth_burst_mode *mode)
3498 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3502 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3503 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3504 snprintf(mode->info, sizeof(mode->info), "%s",
3505 ice_tx_burst_infos[i].info);
3514 /* For each value it means, datasheet of hardware can tell more details
3516 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3518 static inline uint32_t
3519 ice_get_default_pkt_type(uint16_t ptype)
3521 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3522 __rte_cache_aligned = {
3525 [1] = RTE_PTYPE_L2_ETHER,
3526 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3527 /* [3] - [5] reserved */
3528 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3529 /* [7] - [10] reserved */
3530 [11] = RTE_PTYPE_L2_ETHER_ARP,
3531 /* [12] - [21] reserved */
3533 /* Non tunneled IPv4 */
3534 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3536 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3537 RTE_PTYPE_L4_NONFRAG,
3538 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3541 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3543 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3545 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3549 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3550 RTE_PTYPE_TUNNEL_IP |
3551 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3552 RTE_PTYPE_INNER_L4_FRAG,
3553 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3554 RTE_PTYPE_TUNNEL_IP |
3555 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3556 RTE_PTYPE_INNER_L4_NONFRAG,
3557 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3558 RTE_PTYPE_TUNNEL_IP |
3559 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3560 RTE_PTYPE_INNER_L4_UDP,
3562 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3563 RTE_PTYPE_TUNNEL_IP |
3564 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3565 RTE_PTYPE_INNER_L4_TCP,
3566 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3567 RTE_PTYPE_TUNNEL_IP |
3568 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3569 RTE_PTYPE_INNER_L4_SCTP,
3570 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3571 RTE_PTYPE_TUNNEL_IP |
3572 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3573 RTE_PTYPE_INNER_L4_ICMP,
3576 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3577 RTE_PTYPE_TUNNEL_IP |
3578 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3579 RTE_PTYPE_INNER_L4_FRAG,
3580 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3581 RTE_PTYPE_TUNNEL_IP |
3582 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3583 RTE_PTYPE_INNER_L4_NONFRAG,
3584 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3585 RTE_PTYPE_TUNNEL_IP |
3586 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3587 RTE_PTYPE_INNER_L4_UDP,
3589 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3590 RTE_PTYPE_TUNNEL_IP |
3591 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3592 RTE_PTYPE_INNER_L4_TCP,
3593 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3594 RTE_PTYPE_TUNNEL_IP |
3595 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3596 RTE_PTYPE_INNER_L4_SCTP,
3597 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3598 RTE_PTYPE_TUNNEL_IP |
3599 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3600 RTE_PTYPE_INNER_L4_ICMP,
3602 /* IPv4 --> GRE/Teredo/VXLAN */
3603 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3604 RTE_PTYPE_TUNNEL_GRENAT,
3606 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3607 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3608 RTE_PTYPE_TUNNEL_GRENAT |
3609 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3610 RTE_PTYPE_INNER_L4_FRAG,
3611 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3612 RTE_PTYPE_TUNNEL_GRENAT |
3613 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3614 RTE_PTYPE_INNER_L4_NONFRAG,
3615 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3616 RTE_PTYPE_TUNNEL_GRENAT |
3617 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3618 RTE_PTYPE_INNER_L4_UDP,
3620 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3621 RTE_PTYPE_TUNNEL_GRENAT |
3622 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3623 RTE_PTYPE_INNER_L4_TCP,
3624 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3625 RTE_PTYPE_TUNNEL_GRENAT |
3626 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3627 RTE_PTYPE_INNER_L4_SCTP,
3628 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3629 RTE_PTYPE_TUNNEL_GRENAT |
3630 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3631 RTE_PTYPE_INNER_L4_ICMP,
3633 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3634 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3635 RTE_PTYPE_TUNNEL_GRENAT |
3636 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3637 RTE_PTYPE_INNER_L4_FRAG,
3638 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3639 RTE_PTYPE_TUNNEL_GRENAT |
3640 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3641 RTE_PTYPE_INNER_L4_NONFRAG,
3642 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3643 RTE_PTYPE_TUNNEL_GRENAT |
3644 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3645 RTE_PTYPE_INNER_L4_UDP,
3647 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3648 RTE_PTYPE_TUNNEL_GRENAT |
3649 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3650 RTE_PTYPE_INNER_L4_TCP,
3651 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3652 RTE_PTYPE_TUNNEL_GRENAT |
3653 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3654 RTE_PTYPE_INNER_L4_SCTP,
3655 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3656 RTE_PTYPE_TUNNEL_GRENAT |
3657 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3658 RTE_PTYPE_INNER_L4_ICMP,
3660 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3661 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3662 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3664 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3665 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3666 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3667 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3668 RTE_PTYPE_INNER_L4_FRAG,
3669 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3670 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3671 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3672 RTE_PTYPE_INNER_L4_NONFRAG,
3673 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3674 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3675 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3676 RTE_PTYPE_INNER_L4_UDP,
3678 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3679 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3680 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3681 RTE_PTYPE_INNER_L4_TCP,
3682 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3683 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3684 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3685 RTE_PTYPE_INNER_L4_SCTP,
3686 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3687 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3688 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3689 RTE_PTYPE_INNER_L4_ICMP,
3691 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3692 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3693 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3694 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3695 RTE_PTYPE_INNER_L4_FRAG,
3696 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3697 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3698 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3699 RTE_PTYPE_INNER_L4_NONFRAG,
3700 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3701 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3702 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3703 RTE_PTYPE_INNER_L4_UDP,
3705 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3706 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3707 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3708 RTE_PTYPE_INNER_L4_TCP,
3709 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3710 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3711 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3712 RTE_PTYPE_INNER_L4_SCTP,
3713 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3714 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3715 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3716 RTE_PTYPE_INNER_L4_ICMP,
3717 /* [73] - [87] reserved */
3719 /* Non tunneled IPv6 */
3720 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3722 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3723 RTE_PTYPE_L4_NONFRAG,
3724 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3727 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3729 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3731 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3735 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3736 RTE_PTYPE_TUNNEL_IP |
3737 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3738 RTE_PTYPE_INNER_L4_FRAG,
3739 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3740 RTE_PTYPE_TUNNEL_IP |
3741 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3742 RTE_PTYPE_INNER_L4_NONFRAG,
3743 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3744 RTE_PTYPE_TUNNEL_IP |
3745 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3746 RTE_PTYPE_INNER_L4_UDP,
3748 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3749 RTE_PTYPE_TUNNEL_IP |
3750 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3751 RTE_PTYPE_INNER_L4_TCP,
3752 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3753 RTE_PTYPE_TUNNEL_IP |
3754 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3755 RTE_PTYPE_INNER_L4_SCTP,
3756 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3757 RTE_PTYPE_TUNNEL_IP |
3758 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3759 RTE_PTYPE_INNER_L4_ICMP,
3762 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3763 RTE_PTYPE_TUNNEL_IP |
3764 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3765 RTE_PTYPE_INNER_L4_FRAG,
3766 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3767 RTE_PTYPE_TUNNEL_IP |
3768 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3769 RTE_PTYPE_INNER_L4_NONFRAG,
3770 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3771 RTE_PTYPE_TUNNEL_IP |
3772 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3773 RTE_PTYPE_INNER_L4_UDP,
3774 /* [105] reserved */
3775 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3776 RTE_PTYPE_TUNNEL_IP |
3777 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3778 RTE_PTYPE_INNER_L4_TCP,
3779 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3780 RTE_PTYPE_TUNNEL_IP |
3781 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3782 RTE_PTYPE_INNER_L4_SCTP,
3783 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3784 RTE_PTYPE_TUNNEL_IP |
3785 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3786 RTE_PTYPE_INNER_L4_ICMP,
3788 /* IPv6 --> GRE/Teredo/VXLAN */
3789 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3790 RTE_PTYPE_TUNNEL_GRENAT,
3792 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3793 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3794 RTE_PTYPE_TUNNEL_GRENAT |
3795 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3796 RTE_PTYPE_INNER_L4_FRAG,
3797 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3798 RTE_PTYPE_TUNNEL_GRENAT |
3799 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3800 RTE_PTYPE_INNER_L4_NONFRAG,
3801 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3802 RTE_PTYPE_TUNNEL_GRENAT |
3803 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3804 RTE_PTYPE_INNER_L4_UDP,
3805 /* [113] reserved */
3806 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3807 RTE_PTYPE_TUNNEL_GRENAT |
3808 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3809 RTE_PTYPE_INNER_L4_TCP,
3810 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3811 RTE_PTYPE_TUNNEL_GRENAT |
3812 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3813 RTE_PTYPE_INNER_L4_SCTP,
3814 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3815 RTE_PTYPE_TUNNEL_GRENAT |
3816 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3817 RTE_PTYPE_INNER_L4_ICMP,
3819 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3820 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3821 RTE_PTYPE_TUNNEL_GRENAT |
3822 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3823 RTE_PTYPE_INNER_L4_FRAG,
3824 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3825 RTE_PTYPE_TUNNEL_GRENAT |
3826 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3827 RTE_PTYPE_INNER_L4_NONFRAG,
3828 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3829 RTE_PTYPE_TUNNEL_GRENAT |
3830 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3831 RTE_PTYPE_INNER_L4_UDP,
3832 /* [120] reserved */
3833 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3834 RTE_PTYPE_TUNNEL_GRENAT |
3835 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3836 RTE_PTYPE_INNER_L4_TCP,
3837 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3838 RTE_PTYPE_TUNNEL_GRENAT |
3839 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3840 RTE_PTYPE_INNER_L4_SCTP,
3841 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3842 RTE_PTYPE_TUNNEL_GRENAT |
3843 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3844 RTE_PTYPE_INNER_L4_ICMP,
3846 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3847 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3848 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3850 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3851 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3852 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3853 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3854 RTE_PTYPE_INNER_L4_FRAG,
3855 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3856 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3857 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3858 RTE_PTYPE_INNER_L4_NONFRAG,
3859 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3860 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3861 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3862 RTE_PTYPE_INNER_L4_UDP,
3863 /* [128] reserved */
3864 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3865 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3866 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3867 RTE_PTYPE_INNER_L4_TCP,
3868 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3869 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3870 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3871 RTE_PTYPE_INNER_L4_SCTP,
3872 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3873 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3874 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3875 RTE_PTYPE_INNER_L4_ICMP,
3877 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3878 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3879 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3880 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3881 RTE_PTYPE_INNER_L4_FRAG,
3882 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3883 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3884 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3885 RTE_PTYPE_INNER_L4_NONFRAG,
3886 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3887 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3888 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3889 RTE_PTYPE_INNER_L4_UDP,
3890 /* [135] reserved */
3891 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3892 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3893 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3894 RTE_PTYPE_INNER_L4_TCP,
3895 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3896 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3897 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3898 RTE_PTYPE_INNER_L4_SCTP,
3899 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3900 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3901 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3902 RTE_PTYPE_INNER_L4_ICMP,
3903 /* [139] - [299] reserved */
3906 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3907 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3909 /* PPPoE --> IPv4 */
3910 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3911 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3913 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3914 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3915 RTE_PTYPE_L4_NONFRAG,
3916 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3917 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3919 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3920 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3922 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3923 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3925 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3926 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3929 /* PPPoE --> IPv6 */
3930 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3931 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3933 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3934 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3935 RTE_PTYPE_L4_NONFRAG,
3936 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3937 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3939 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3940 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3942 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3943 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3945 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3946 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3948 /* [314] - [324] reserved */
3950 /* IPv4/IPv6 --> GTPC/GTPU */
3951 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3952 RTE_PTYPE_TUNNEL_GTPC,
3953 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3954 RTE_PTYPE_TUNNEL_GTPC,
3955 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3956 RTE_PTYPE_TUNNEL_GTPC,
3957 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3958 RTE_PTYPE_TUNNEL_GTPC,
3959 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3960 RTE_PTYPE_TUNNEL_GTPU,
3961 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3962 RTE_PTYPE_TUNNEL_GTPU,
3964 /* IPv4 --> GTPU --> IPv4 */
3965 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3966 RTE_PTYPE_TUNNEL_GTPU |
3967 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3968 RTE_PTYPE_INNER_L4_FRAG,
3969 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3970 RTE_PTYPE_TUNNEL_GTPU |
3971 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3972 RTE_PTYPE_INNER_L4_NONFRAG,
3973 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3974 RTE_PTYPE_TUNNEL_GTPU |
3975 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3976 RTE_PTYPE_INNER_L4_UDP,
3977 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3978 RTE_PTYPE_TUNNEL_GTPU |
3979 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3980 RTE_PTYPE_INNER_L4_TCP,
3981 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3982 RTE_PTYPE_TUNNEL_GTPU |
3983 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3984 RTE_PTYPE_INNER_L4_ICMP,
3986 /* IPv6 --> GTPU --> IPv4 */
3987 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3988 RTE_PTYPE_TUNNEL_GTPU |
3989 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3990 RTE_PTYPE_INNER_L4_FRAG,
3991 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3992 RTE_PTYPE_TUNNEL_GTPU |
3993 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3994 RTE_PTYPE_INNER_L4_NONFRAG,
3995 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3996 RTE_PTYPE_TUNNEL_GTPU |
3997 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3998 RTE_PTYPE_INNER_L4_UDP,
3999 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4000 RTE_PTYPE_TUNNEL_GTPU |
4001 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4002 RTE_PTYPE_INNER_L4_TCP,
4003 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4004 RTE_PTYPE_TUNNEL_GTPU |
4005 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4006 RTE_PTYPE_INNER_L4_ICMP,
4008 /* IPv4 --> GTPU --> IPv6 */
4009 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4010 RTE_PTYPE_TUNNEL_GTPU |
4011 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4012 RTE_PTYPE_INNER_L4_FRAG,
4013 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4014 RTE_PTYPE_TUNNEL_GTPU |
4015 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4016 RTE_PTYPE_INNER_L4_NONFRAG,
4017 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4018 RTE_PTYPE_TUNNEL_GTPU |
4019 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4020 RTE_PTYPE_INNER_L4_UDP,
4021 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4022 RTE_PTYPE_TUNNEL_GTPU |
4023 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4024 RTE_PTYPE_INNER_L4_TCP,
4025 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4026 RTE_PTYPE_TUNNEL_GTPU |
4027 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4028 RTE_PTYPE_INNER_L4_ICMP,
4030 /* IPv6 --> GTPU --> IPv6 */
4031 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4032 RTE_PTYPE_TUNNEL_GTPU |
4033 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4034 RTE_PTYPE_INNER_L4_FRAG,
4035 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4036 RTE_PTYPE_TUNNEL_GTPU |
4037 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4038 RTE_PTYPE_INNER_L4_NONFRAG,
4039 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4040 RTE_PTYPE_TUNNEL_GTPU |
4041 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4042 RTE_PTYPE_INNER_L4_UDP,
4043 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4044 RTE_PTYPE_TUNNEL_GTPU |
4045 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4046 RTE_PTYPE_INNER_L4_TCP,
4047 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4048 RTE_PTYPE_TUNNEL_GTPU |
4049 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4050 RTE_PTYPE_INNER_L4_ICMP,
4052 /* IPv4 --> UDP ECPRI */
4053 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4055 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4057 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4059 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4061 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4063 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4065 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4067 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4069 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4071 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4074 /* IPV6 --> UDP ECPRI */
4075 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4077 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4079 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4081 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4083 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4085 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4087 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4089 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4091 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4093 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4095 /* All others reserved */
4098 return type_table[ptype];
4102 ice_set_default_ptype_table(struct rte_eth_dev *dev)
4104 struct ice_adapter *ad =
4105 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4108 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4109 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4112 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
4113 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
4114 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4115 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4116 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4118 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
4119 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
4120 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4121 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
4122 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
4123 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4126 * check the programming status descriptor in rx queue.
4127 * done after Programming Flow Director is programmed on
4131 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4133 volatile union ice_32byte_rx_desc *rxdp;
4140 rxdp = (volatile union ice_32byte_rx_desc *)
4141 (&rxq->rx_ring[rxq->rx_tail]);
4142 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4143 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4144 >> ICE_RXD_QW1_STATUS_S;
4146 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4148 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4149 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4150 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4151 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4153 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4154 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4155 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4156 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4160 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4161 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4163 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4167 rxdp->wb.qword1.status_error_len = 0;
4169 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4171 if (rxq->rx_tail == 0)
4172 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4174 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4180 #define ICE_FDIR_MAX_WAIT_US 10000
4183 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4185 struct ice_tx_queue *txq = pf->fdir.txq;
4186 struct ice_rx_queue *rxq = pf->fdir.rxq;
4187 volatile struct ice_fltr_desc *fdirdp;
4188 volatile struct ice_tx_desc *txdp;
4192 fdirdp = (volatile struct ice_fltr_desc *)
4193 (&txq->tx_ring[txq->tx_tail]);
4194 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4195 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4197 txdp = &txq->tx_ring[txq->tx_tail + 1];
4198 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4199 td_cmd = ICE_TX_DESC_CMD_EOP |
4200 ICE_TX_DESC_CMD_RS |
4201 ICE_TX_DESC_CMD_DUMMY;
4203 txdp->cmd_type_offset_bsz =
4204 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4207 if (txq->tx_tail >= txq->nb_tx_desc)
4209 /* Update the tx tail register */
4210 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4211 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4212 if ((txdp->cmd_type_offset_bsz &
4213 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4214 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4218 if (i >= ICE_FDIR_MAX_WAIT_US) {
4220 "Failed to program FDIR filter: time out to get DD on tx queue.");
4224 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4227 ret = ice_check_fdir_programming_status(rxq);
4235 "Failed to program FDIR filter: programming status reported.");