1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
8 #include "rte_pmd_ice.h"
11 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
15 PKT_TX_OUTER_IP_CKSUM)
17 /* Offset of mbuf dynamic field for protocol extraction data */
18 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
20 /* Mask of mbuf dynamic flags for protocol extraction type */
21 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
22 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
28 static inline uint64_t
29 ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid)
35 [ICE_RXDID_COMMS_AUX_VLAN] = {
36 &rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
37 [ICE_RXDID_COMMS_AUX_IPV4] = {
38 &rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
39 [ICE_RXDID_COMMS_AUX_IPV6] = {
40 &rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
41 [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
42 &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, true },
43 [ICE_RXDID_COMMS_AUX_TCP] = {
44 &rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
45 [ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
46 &rte_net_ice_dynflag_proto_xtr_ip_offset_mask, false },
50 if (rxdid < RTE_DIM(ol_flag_map)) {
51 ol_flag = ol_flag_map[rxdid].ol_flag;
55 *chk_valid = ol_flag_map[rxdid].chk_valid;
63 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
65 static uint8_t rxdid_map[] = {
66 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
67 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
68 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
69 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
70 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
71 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
72 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
75 return xtr_type < RTE_DIM(rxdid_map) ?
76 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
79 static enum ice_status
80 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
82 struct ice_vsi *vsi = rxq->vsi;
83 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
84 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
85 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
86 struct ice_rlan_ctx rx_ctx;
88 uint16_t buf_size, len;
89 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
90 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
93 /* Set buffer size as the head split is disabled. */
94 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
95 RTE_PKTMBUF_HEADROOM);
97 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
98 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
99 rxq->max_pkt_len = RTE_MIN(len,
100 dev->data->dev_conf.rxmode.max_rx_pkt_len);
102 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
103 if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
104 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
105 PMD_DRV_LOG(ERR, "maximum packet length must "
106 "be larger than %u and smaller than %u,"
107 "as jumbo frame is enabled",
108 (uint32_t)RTE_ETHER_MAX_LEN,
109 (uint32_t)ICE_FRAME_SIZE_MAX);
113 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
114 rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
115 PMD_DRV_LOG(ERR, "maximum packet length must be "
116 "larger than %u and smaller than %u, "
117 "as jumbo frame is disabled",
118 (uint32_t)RTE_ETHER_MIN_LEN,
119 (uint32_t)RTE_ETHER_MAX_LEN);
124 memset(&rx_ctx, 0, sizeof(rx_ctx));
126 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
127 rx_ctx.qlen = rxq->nb_rx_desc;
128 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
129 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
130 rx_ctx.dtype = 0; /* No Header Split mode */
131 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
132 rx_ctx.dsize = 1; /* 32B descriptors */
134 rx_ctx.rxmax = rxq->max_pkt_len;
135 /* TPH: Transaction Layer Packet (TLP) processing hints */
136 rx_ctx.tphrdesc_ena = 1;
137 rx_ctx.tphwdesc_ena = 1;
138 rx_ctx.tphdata_ena = 1;
139 rx_ctx.tphhead_ena = 1;
140 /* Low Receive Queue Threshold defined in 64 descriptors units.
141 * When the number of free descriptors goes below the lrxqthresh,
142 * an immediate interrupt is triggered.
144 rx_ctx.lrxqthresh = 2;
145 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
148 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
150 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
152 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
153 rxq->port_id, rxq->queue_id, rxdid);
155 if (!(pf->supported_rxdid & BIT(rxdid))) {
156 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
161 /* Enable Flexible Descriptors in the queue context which
162 * allows this driver to select a specific receive descriptor format
164 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
165 QRXFLXP_CNTXT_RXDID_IDX_M;
167 /* increasing context priority to pick up profile ID;
168 * default is 0x01; setting to 0x03 to ensure profile
169 * is programming if prev context is of same priority
171 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
172 QRXFLXP_CNTXT_RXDID_PRIO_M;
174 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
176 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
178 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
182 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
184 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
189 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
190 RTE_PKTMBUF_HEADROOM);
192 /* Check if scattered RX needs to be used. */
193 if (rxq->max_pkt_len > buf_size)
194 dev->data->scattered_rx = 1;
196 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
198 /* Init the Rx tail register*/
199 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
204 /* Allocate mbufs for all descriptors in rx queue */
206 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
208 struct ice_rx_entry *rxe = rxq->sw_ring;
212 for (i = 0; i < rxq->nb_rx_desc; i++) {
213 volatile union ice_rx_flex_desc *rxd;
214 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
216 if (unlikely(!mbuf)) {
217 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
221 rte_mbuf_refcnt_set(mbuf, 1);
223 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
225 mbuf->port = rxq->port_id;
228 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
230 rxd = &rxq->rx_ring[i];
231 rxd->read.pkt_addr = dma_addr;
232 rxd->read.hdr_addr = 0;
233 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
243 /* Free all mbufs for descriptors in rx queue */
245 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
249 if (!rxq || !rxq->sw_ring) {
250 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
254 for (i = 0; i < rxq->nb_rx_desc; i++) {
255 if (rxq->sw_ring[i].mbuf) {
256 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
257 rxq->sw_ring[i].mbuf = NULL;
260 if (rxq->rx_nb_avail == 0)
262 for (i = 0; i < rxq->rx_nb_avail; i++)
263 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
265 rxq->rx_nb_avail = 0;
268 /* turn on or off rx queue
269 * @q_idx: queue index in pf scope
270 * @on: turn on or off the queue
273 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
278 /* QRX_CTRL = QRX_ENA */
279 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
282 if (reg & QRX_CTRL_QENA_STAT_M)
283 return 0; /* Already on, skip */
284 reg |= QRX_CTRL_QENA_REQ_M;
286 if (!(reg & QRX_CTRL_QENA_STAT_M))
287 return 0; /* Already off, skip */
288 reg &= ~QRX_CTRL_QENA_REQ_M;
291 /* Write the register */
292 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
293 /* Check the result. It is said that QENA_STAT
294 * follows the QENA_REQ not more than 10 use.
295 * TODO: need to change the wait counter later
297 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
298 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
299 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
301 if ((reg & QRX_CTRL_QENA_REQ_M) &&
302 (reg & QRX_CTRL_QENA_STAT_M))
305 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
306 !(reg & QRX_CTRL_QENA_STAT_M))
311 /* Check if it is timeout */
312 if (j >= ICE_CHK_Q_ENA_COUNT) {
313 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
314 (on ? "enable" : "disable"), q_idx);
322 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
326 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
327 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
328 "rxq->rx_free_thresh=%d, "
329 "ICE_RX_MAX_BURST=%d",
330 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
332 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
333 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
334 "rxq->rx_free_thresh=%d, "
335 "rxq->nb_rx_desc=%d",
336 rxq->rx_free_thresh, rxq->nb_rx_desc);
338 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
339 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
340 "rxq->nb_rx_desc=%d, "
341 "rxq->rx_free_thresh=%d",
342 rxq->nb_rx_desc, rxq->rx_free_thresh);
349 /* reset fields in ice_rx_queue back to default */
351 ice_reset_rx_queue(struct ice_rx_queue *rxq)
357 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
361 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
363 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
364 ((volatile char *)rxq->rx_ring)[i] = 0;
366 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
367 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
368 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
370 rxq->rx_nb_avail = 0;
371 rxq->rx_next_avail = 0;
372 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
376 rxq->pkt_first_seg = NULL;
377 rxq->pkt_last_seg = NULL;
379 rxq->rxrearm_start = 0;
384 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
386 struct ice_rx_queue *rxq;
388 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
390 PMD_INIT_FUNC_TRACE();
392 if (rx_queue_id >= dev->data->nb_rx_queues) {
393 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
394 rx_queue_id, dev->data->nb_rx_queues);
398 rxq = dev->data->rx_queues[rx_queue_id];
399 if (!rxq || !rxq->q_set) {
400 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
405 err = ice_program_hw_rx_queue(rxq);
407 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
412 err = ice_alloc_rx_queue_mbufs(rxq);
414 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
418 /* Init the RX tail register. */
419 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
421 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
423 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
426 rxq->rx_rel_mbufs(rxq);
427 ice_reset_rx_queue(rxq);
431 dev->data->rx_queue_state[rx_queue_id] =
432 RTE_ETH_QUEUE_STATE_STARTED;
438 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
440 struct ice_rx_queue *rxq;
442 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
444 if (rx_queue_id < dev->data->nb_rx_queues) {
445 rxq = dev->data->rx_queues[rx_queue_id];
447 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
449 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
453 rxq->rx_rel_mbufs(rxq);
454 ice_reset_rx_queue(rxq);
455 dev->data->rx_queue_state[rx_queue_id] =
456 RTE_ETH_QUEUE_STATE_STOPPED;
463 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
465 struct ice_tx_queue *txq;
469 struct ice_aqc_add_tx_qgrp *txq_elem;
470 struct ice_tlan_ctx tx_ctx;
473 PMD_INIT_FUNC_TRACE();
475 if (tx_queue_id >= dev->data->nb_tx_queues) {
476 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
477 tx_queue_id, dev->data->nb_tx_queues);
481 txq = dev->data->tx_queues[tx_queue_id];
482 if (!txq || !txq->q_set) {
483 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
488 buf_len = ice_struct_size(txq_elem, txqs, 1);
489 txq_elem = ice_malloc(hw, buf_len);
494 hw = ICE_VSI_TO_HW(vsi);
496 memset(&tx_ctx, 0, sizeof(tx_ctx));
497 txq_elem->num_txqs = 1;
498 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
500 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
501 tx_ctx.qlen = txq->nb_tx_desc;
502 tx_ctx.pf_num = hw->pf_id;
503 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
504 tx_ctx.src_vsi = vsi->vsi_id;
505 tx_ctx.port_num = hw->port_info->lport;
506 tx_ctx.tso_ena = 1; /* tso enable */
507 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
508 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
510 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
513 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
515 /* Init the Tx tail register*/
516 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
518 /* Fix me, we assume TC always 0 here */
519 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
520 txq_elem, buf_len, NULL);
522 PMD_DRV_LOG(ERR, "Failed to add lan txq");
526 /* store the schedule node id */
527 txq->q_teid = txq_elem->txqs[0].q_teid;
529 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
535 static enum ice_status
536 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
538 struct ice_vsi *vsi = rxq->vsi;
539 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
540 uint32_t rxdid = ICE_RXDID_LEGACY_1;
541 struct ice_rlan_ctx rx_ctx;
546 rxq->rx_buf_len = 1024;
548 memset(&rx_ctx, 0, sizeof(rx_ctx));
550 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
551 rx_ctx.qlen = rxq->nb_rx_desc;
552 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
553 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
554 rx_ctx.dtype = 0; /* No Header Split mode */
555 rx_ctx.dsize = 1; /* 32B descriptors */
556 rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
557 /* TPH: Transaction Layer Packet (TLP) processing hints */
558 rx_ctx.tphrdesc_ena = 1;
559 rx_ctx.tphwdesc_ena = 1;
560 rx_ctx.tphdata_ena = 1;
561 rx_ctx.tphhead_ena = 1;
562 /* Low Receive Queue Threshold defined in 64 descriptors units.
563 * When the number of free descriptors goes below the lrxqthresh,
564 * an immediate interrupt is triggered.
566 rx_ctx.lrxqthresh = 2;
567 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
570 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
572 /* Enable Flexible Descriptors in the queue context which
573 * allows this driver to select a specific receive descriptor format
575 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
576 QRXFLXP_CNTXT_RXDID_IDX_M;
578 /* increasing context priority to pick up profile ID;
579 * default is 0x01; setting to 0x03 to ensure profile
580 * is programming if prev context is of same priority
582 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
583 QRXFLXP_CNTXT_RXDID_PRIO_M;
585 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
587 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
589 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
593 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
595 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
600 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
602 /* Init the Rx tail register*/
603 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
609 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
611 struct ice_rx_queue *rxq;
613 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
614 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
616 PMD_INIT_FUNC_TRACE();
619 if (!rxq || !rxq->q_set) {
620 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
625 err = ice_fdir_program_hw_rx_queue(rxq);
627 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
632 /* Init the RX tail register. */
633 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
635 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
637 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
640 ice_reset_rx_queue(rxq);
648 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
650 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
651 struct ice_tx_queue *txq;
655 struct ice_aqc_add_tx_qgrp *txq_elem;
656 struct ice_tlan_ctx tx_ctx;
659 PMD_INIT_FUNC_TRACE();
662 if (!txq || !txq->q_set) {
663 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
668 buf_len = ice_struct_size(txq_elem, txqs, 1);
669 txq_elem = ice_malloc(hw, buf_len);
674 hw = ICE_VSI_TO_HW(vsi);
676 memset(&tx_ctx, 0, sizeof(tx_ctx));
677 txq_elem->num_txqs = 1;
678 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
680 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
681 tx_ctx.qlen = txq->nb_tx_desc;
682 tx_ctx.pf_num = hw->pf_id;
683 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
684 tx_ctx.src_vsi = vsi->vsi_id;
685 tx_ctx.port_num = hw->port_info->lport;
686 tx_ctx.tso_ena = 1; /* tso enable */
687 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
688 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
690 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
693 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
695 /* Init the Tx tail register*/
696 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
698 /* Fix me, we assume TC always 0 here */
699 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
700 txq_elem, buf_len, NULL);
702 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
706 /* store the schedule node id */
707 txq->q_teid = txq_elem->txqs[0].q_teid;
713 /* Free all mbufs for descriptors in tx queue */
715 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
719 if (!txq || !txq->sw_ring) {
720 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
724 for (i = 0; i < txq->nb_tx_desc; i++) {
725 if (txq->sw_ring[i].mbuf) {
726 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
727 txq->sw_ring[i].mbuf = NULL;
733 ice_reset_tx_queue(struct ice_tx_queue *txq)
735 struct ice_tx_entry *txe;
736 uint16_t i, prev, size;
739 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
744 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
745 for (i = 0; i < size; i++)
746 ((volatile char *)txq->tx_ring)[i] = 0;
748 prev = (uint16_t)(txq->nb_tx_desc - 1);
749 for (i = 0; i < txq->nb_tx_desc; i++) {
750 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
752 txd->cmd_type_offset_bsz =
753 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
756 txe[prev].next_id = i;
760 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
761 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
766 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
767 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
771 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
773 struct ice_tx_queue *txq;
774 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
775 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
776 struct ice_vsi *vsi = pf->main_vsi;
777 enum ice_status status;
780 uint16_t q_handle = tx_queue_id;
782 if (tx_queue_id >= dev->data->nb_tx_queues) {
783 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
784 tx_queue_id, dev->data->nb_tx_queues);
788 txq = dev->data->tx_queues[tx_queue_id];
790 PMD_DRV_LOG(ERR, "TX queue %u is not available",
795 q_ids[0] = txq->reg_idx;
796 q_teids[0] = txq->q_teid;
798 /* Fix me, we assume TC always 0 here */
799 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
800 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
801 if (status != ICE_SUCCESS) {
802 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
806 txq->tx_rel_mbufs(txq);
807 ice_reset_tx_queue(txq);
808 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
814 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
816 struct ice_rx_queue *rxq;
818 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
819 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
823 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
825 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
829 rxq->rx_rel_mbufs(rxq);
835 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
837 struct ice_tx_queue *txq;
838 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
839 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
840 struct ice_vsi *vsi = pf->main_vsi;
841 enum ice_status status;
844 uint16_t q_handle = tx_queue_id;
848 PMD_DRV_LOG(ERR, "TX queue %u is not available",
854 q_ids[0] = txq->reg_idx;
855 q_teids[0] = txq->q_teid;
857 /* Fix me, we assume TC always 0 here */
858 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
859 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
860 if (status != ICE_SUCCESS) {
861 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
865 txq->tx_rel_mbufs(txq);
871 ice_rx_queue_setup(struct rte_eth_dev *dev,
874 unsigned int socket_id,
875 const struct rte_eth_rxconf *rx_conf,
876 struct rte_mempool *mp)
878 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
879 struct ice_adapter *ad =
880 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
881 struct ice_vsi *vsi = pf->main_vsi;
882 struct ice_rx_queue *rxq;
883 const struct rte_memzone *rz;
886 int use_def_burst_func = 1;
888 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
889 nb_desc > ICE_MAX_RING_DESC ||
890 nb_desc < ICE_MIN_RING_DESC) {
891 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
896 /* Free memory if needed */
897 if (dev->data->rx_queues[queue_idx]) {
898 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
899 dev->data->rx_queues[queue_idx] = NULL;
902 /* Allocate the rx queue data structure */
903 rxq = rte_zmalloc_socket(NULL,
904 sizeof(struct ice_rx_queue),
908 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
909 "rx queue data structure");
913 rxq->nb_rx_desc = nb_desc;
914 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
915 rxq->queue_id = queue_idx;
917 rxq->reg_idx = vsi->base_queue + queue_idx;
918 rxq->port_id = dev->data->port_id;
919 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
920 rxq->crc_len = RTE_ETHER_CRC_LEN;
924 rxq->drop_en = rx_conf->rx_drop_en;
926 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
927 rxq->proto_xtr = pf->proto_xtr != NULL ?
928 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
930 /* Allocate the maximun number of RX ring hardware descriptor. */
931 len = ICE_MAX_RING_DESC;
934 * Allocating a little more memory because vectorized/bulk_alloc Rx
935 * functions doesn't check boundaries each time.
937 len += ICE_RX_MAX_BURST;
939 /* Allocate the maximum number of RX ring hardware descriptor. */
940 ring_size = sizeof(union ice_rx_flex_desc) * len;
941 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
942 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
943 ring_size, ICE_RING_BASE_ALIGN,
946 ice_rx_queue_release(rxq);
947 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
951 /* Zero all the descriptors in the ring. */
952 memset(rz->addr, 0, ring_size);
954 rxq->rx_ring_dma = rz->iova;
955 rxq->rx_ring = rz->addr;
957 /* always reserve more for bulk alloc */
958 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
960 /* Allocate the software ring. */
961 rxq->sw_ring = rte_zmalloc_socket(NULL,
962 sizeof(struct ice_rx_entry) * len,
966 ice_rx_queue_release(rxq);
967 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
971 ice_reset_rx_queue(rxq);
973 dev->data->rx_queues[queue_idx] = rxq;
974 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
976 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
978 if (!use_def_burst_func) {
979 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
980 "satisfied. Rx Burst Bulk Alloc function will be "
981 "used on port=%d, queue=%d.",
982 rxq->port_id, rxq->queue_id);
984 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
985 "not satisfied, Scattered Rx is requested. "
986 "on port=%d, queue=%d.",
987 rxq->port_id, rxq->queue_id);
988 ad->rx_bulk_alloc_allowed = false;
995 ice_rx_queue_release(void *rxq)
997 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1000 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1005 rte_free(q->sw_ring);
1010 ice_tx_queue_setup(struct rte_eth_dev *dev,
1013 unsigned int socket_id,
1014 const struct rte_eth_txconf *tx_conf)
1016 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1017 struct ice_vsi *vsi = pf->main_vsi;
1018 struct ice_tx_queue *txq;
1019 const struct rte_memzone *tz;
1021 uint16_t tx_rs_thresh, tx_free_thresh;
1024 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1026 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1027 nb_desc > ICE_MAX_RING_DESC ||
1028 nb_desc < ICE_MIN_RING_DESC) {
1029 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1030 "invalid", nb_desc);
1035 * The following two parameters control the setting of the RS bit on
1036 * transmit descriptors. TX descriptors will have their RS bit set
1037 * after txq->tx_rs_thresh descriptors have been used. The TX
1038 * descriptor ring will be cleaned after txq->tx_free_thresh
1039 * descriptors are used or if the number of descriptors required to
1040 * transmit a packet is greater than the number of free TX descriptors.
1042 * The following constraints must be satisfied:
1043 * - tx_rs_thresh must be greater than 0.
1044 * - tx_rs_thresh must be less than the size of the ring minus 2.
1045 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1046 * - tx_rs_thresh must be a divisor of the ring size.
1047 * - tx_free_thresh must be greater than 0.
1048 * - tx_free_thresh must be less than the size of the ring minus 3.
1049 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1051 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1052 * race condition, hence the maximum threshold constraints. When set
1053 * to zero use default values.
1055 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1056 tx_conf->tx_free_thresh :
1057 ICE_DEFAULT_TX_FREE_THRESH);
1058 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1060 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1061 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1062 if (tx_conf->tx_rs_thresh)
1063 tx_rs_thresh = tx_conf->tx_rs_thresh;
1064 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1065 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1066 "exceed nb_desc. (tx_rs_thresh=%u "
1067 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1068 (unsigned int)tx_rs_thresh,
1069 (unsigned int)tx_free_thresh,
1070 (unsigned int)nb_desc,
1071 (int)dev->data->port_id,
1075 if (tx_rs_thresh >= (nb_desc - 2)) {
1076 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1077 "number of TX descriptors minus 2. "
1078 "(tx_rs_thresh=%u port=%d queue=%d)",
1079 (unsigned int)tx_rs_thresh,
1080 (int)dev->data->port_id,
1084 if (tx_free_thresh >= (nb_desc - 3)) {
1085 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1086 "tx_free_thresh must be less than the "
1087 "number of TX descriptors minus 3. "
1088 "(tx_free_thresh=%u port=%d queue=%d)",
1089 (unsigned int)tx_free_thresh,
1090 (int)dev->data->port_id,
1094 if (tx_rs_thresh > tx_free_thresh) {
1095 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1096 "equal to tx_free_thresh. (tx_free_thresh=%u"
1097 " tx_rs_thresh=%u port=%d queue=%d)",
1098 (unsigned int)tx_free_thresh,
1099 (unsigned int)tx_rs_thresh,
1100 (int)dev->data->port_id,
1104 if ((nb_desc % tx_rs_thresh) != 0) {
1105 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1106 "number of TX descriptors. (tx_rs_thresh=%u"
1107 " port=%d queue=%d)",
1108 (unsigned int)tx_rs_thresh,
1109 (int)dev->data->port_id,
1113 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1114 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1115 "tx_rs_thresh is greater than 1. "
1116 "(tx_rs_thresh=%u port=%d queue=%d)",
1117 (unsigned int)tx_rs_thresh,
1118 (int)dev->data->port_id,
1123 /* Free memory if needed. */
1124 if (dev->data->tx_queues[queue_idx]) {
1125 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1126 dev->data->tx_queues[queue_idx] = NULL;
1129 /* Allocate the TX queue data structure. */
1130 txq = rte_zmalloc_socket(NULL,
1131 sizeof(struct ice_tx_queue),
1132 RTE_CACHE_LINE_SIZE,
1135 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1136 "tx queue structure");
1140 /* Allocate TX hardware ring descriptors. */
1141 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1142 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1143 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1144 ring_size, ICE_RING_BASE_ALIGN,
1147 ice_tx_queue_release(txq);
1148 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1152 txq->nb_tx_desc = nb_desc;
1153 txq->tx_rs_thresh = tx_rs_thresh;
1154 txq->tx_free_thresh = tx_free_thresh;
1155 txq->pthresh = tx_conf->tx_thresh.pthresh;
1156 txq->hthresh = tx_conf->tx_thresh.hthresh;
1157 txq->wthresh = tx_conf->tx_thresh.wthresh;
1158 txq->queue_id = queue_idx;
1160 txq->reg_idx = vsi->base_queue + queue_idx;
1161 txq->port_id = dev->data->port_id;
1162 txq->offloads = offloads;
1164 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1166 txq->tx_ring_dma = tz->iova;
1167 txq->tx_ring = tz->addr;
1169 /* Allocate software ring */
1171 rte_zmalloc_socket(NULL,
1172 sizeof(struct ice_tx_entry) * nb_desc,
1173 RTE_CACHE_LINE_SIZE,
1175 if (!txq->sw_ring) {
1176 ice_tx_queue_release(txq);
1177 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1181 ice_reset_tx_queue(txq);
1183 dev->data->tx_queues[queue_idx] = txq;
1184 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1185 ice_set_tx_function_flag(dev, txq);
1191 ice_tx_queue_release(void *txq)
1193 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1196 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1201 rte_free(q->sw_ring);
1206 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1207 struct rte_eth_rxq_info *qinfo)
1209 struct ice_rx_queue *rxq;
1211 rxq = dev->data->rx_queues[queue_id];
1213 qinfo->mp = rxq->mp;
1214 qinfo->scattered_rx = dev->data->scattered_rx;
1215 qinfo->nb_desc = rxq->nb_rx_desc;
1217 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1218 qinfo->conf.rx_drop_en = rxq->drop_en;
1219 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1223 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1224 struct rte_eth_txq_info *qinfo)
1226 struct ice_tx_queue *txq;
1228 txq = dev->data->tx_queues[queue_id];
1230 qinfo->nb_desc = txq->nb_tx_desc;
1232 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1233 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1234 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1236 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1237 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1238 qinfo->conf.offloads = txq->offloads;
1239 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1243 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1245 #define ICE_RXQ_SCAN_INTERVAL 4
1246 volatile union ice_rx_flex_desc *rxdp;
1247 struct ice_rx_queue *rxq;
1250 rxq = dev->data->rx_queues[rx_queue_id];
1251 rxdp = &rxq->rx_ring[rxq->rx_tail];
1252 while ((desc < rxq->nb_rx_desc) &&
1253 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1254 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1256 * Check the DD bit of a rx descriptor of each 4 in a group,
1257 * to avoid checking too frequently and downgrading performance
1260 desc += ICE_RXQ_SCAN_INTERVAL;
1261 rxdp += ICE_RXQ_SCAN_INTERVAL;
1262 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1263 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1264 desc - rxq->nb_rx_desc]);
1270 #define ICE_RX_FLEX_ERR0_BITS \
1271 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1272 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1273 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1274 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1275 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1276 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1278 /* Rx L3/L4 checksum */
1279 static inline uint64_t
1280 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1284 /* check if HW has decoded the packet and checksum */
1285 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1288 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1289 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1293 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1294 flags |= PKT_RX_IP_CKSUM_BAD;
1296 flags |= PKT_RX_IP_CKSUM_GOOD;
1298 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1299 flags |= PKT_RX_L4_CKSUM_BAD;
1301 flags |= PKT_RX_L4_CKSUM_GOOD;
1303 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1304 flags |= PKT_RX_EIP_CKSUM_BAD;
1310 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1312 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1313 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1314 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1316 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1317 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1318 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1323 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1324 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1325 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1326 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1327 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1328 mb->vlan_tci_outer = mb->vlan_tci;
1329 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1330 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1331 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1332 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1334 mb->vlan_tci_outer = 0;
1337 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1338 mb->vlan_tci, mb->vlan_tci_outer);
1341 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1342 #define ICE_RX_PROTO_XTR_VALID \
1343 ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
1344 (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
1347 ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
1348 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc)
1350 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
1351 uint32_t metadata = 0;
1355 ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
1356 if (unlikely(!ol_flag))
1360 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
1361 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
1363 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
1365 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
1367 if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)
1368 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
1369 else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
1370 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
1376 mb->ol_flags |= ol_flag;
1378 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
1383 ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
1384 volatile union ice_rx_flex_desc *rxdp)
1386 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
1387 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
1388 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1391 stat_err = rte_le_to_cpu_16(desc->status_error0);
1392 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
1393 mb->ol_flags |= PKT_RX_RSS_HASH;
1394 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
1398 if (desc->flow_id != 0xFFFFFFFF) {
1399 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1400 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
1403 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1404 if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
1405 ice_rxd_to_proto_xtr(mb, desc);
1409 #define ICE_LOOK_AHEAD 8
1410 #if (ICE_LOOK_AHEAD != 8)
1411 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1414 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1416 volatile union ice_rx_flex_desc *rxdp;
1417 struct ice_rx_entry *rxep;
1418 struct rte_mbuf *mb;
1421 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1422 int32_t i, j, nb_rx = 0;
1423 uint64_t pkt_flags = 0;
1424 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1426 rxdp = &rxq->rx_ring[rxq->rx_tail];
1427 rxep = &rxq->sw_ring[rxq->rx_tail];
1429 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1431 /* Make sure there is at least 1 packet to receive */
1432 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1436 * Scan LOOK_AHEAD descriptors at a time to determine which
1437 * descriptors reference packets that are ready to be received.
1439 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1440 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1441 /* Read desc statuses backwards to avoid race condition */
1442 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1443 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1447 /* Compute how many status bits were set */
1448 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1449 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1453 /* Translate descriptor info to mbuf parameters */
1454 for (j = 0; j < nb_dd; j++) {
1456 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1457 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1458 mb->data_len = pkt_len;
1459 mb->pkt_len = pkt_len;
1461 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1462 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1463 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1464 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1465 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1466 ice_rxd_to_pkt_fields(mb, &rxdp[j]);
1468 mb->ol_flags |= pkt_flags;
1471 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1472 rxq->rx_stage[i + j] = rxep[j].mbuf;
1474 if (nb_dd != ICE_LOOK_AHEAD)
1478 /* Clear software ring entries */
1479 for (i = 0; i < nb_rx; i++)
1480 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1482 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1483 "port_id=%u, queue_id=%u, nb_rx=%d",
1484 rxq->port_id, rxq->queue_id, nb_rx);
1489 static inline uint16_t
1490 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1491 struct rte_mbuf **rx_pkts,
1495 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1497 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1499 for (i = 0; i < nb_pkts; i++)
1500 rx_pkts[i] = stage[i];
1502 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1503 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1509 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1511 volatile union ice_rx_flex_desc *rxdp;
1512 struct ice_rx_entry *rxep;
1513 struct rte_mbuf *mb;
1514 uint16_t alloc_idx, i;
1518 /* Allocate buffers in bulk */
1519 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1520 (rxq->rx_free_thresh - 1));
1521 rxep = &rxq->sw_ring[alloc_idx];
1522 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1523 rxq->rx_free_thresh);
1524 if (unlikely(diag != 0)) {
1525 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1529 rxdp = &rxq->rx_ring[alloc_idx];
1530 for (i = 0; i < rxq->rx_free_thresh; i++) {
1531 if (likely(i < (rxq->rx_free_thresh - 1)))
1532 /* Prefetch next mbuf */
1533 rte_prefetch0(rxep[i + 1].mbuf);
1536 rte_mbuf_refcnt_set(mb, 1);
1538 mb->data_off = RTE_PKTMBUF_HEADROOM;
1540 mb->port = rxq->port_id;
1541 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1542 rxdp[i].read.hdr_addr = 0;
1543 rxdp[i].read.pkt_addr = dma_addr;
1546 /* Update rx tail regsiter */
1547 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1549 rxq->rx_free_trigger =
1550 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1551 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1552 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1557 static inline uint16_t
1558 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1560 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1562 struct rte_eth_dev *dev;
1567 if (rxq->rx_nb_avail)
1568 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1570 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1571 rxq->rx_next_avail = 0;
1572 rxq->rx_nb_avail = nb_rx;
1573 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1575 if (rxq->rx_tail > rxq->rx_free_trigger) {
1576 if (ice_rx_alloc_bufs(rxq) != 0) {
1579 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1580 dev->data->rx_mbuf_alloc_failed +=
1581 rxq->rx_free_thresh;
1582 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1583 "port_id=%u, queue_id=%u",
1584 rxq->port_id, rxq->queue_id);
1585 rxq->rx_nb_avail = 0;
1586 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1587 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1588 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1594 if (rxq->rx_tail >= rxq->nb_rx_desc)
1597 if (rxq->rx_nb_avail)
1598 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1604 ice_recv_pkts_bulk_alloc(void *rx_queue,
1605 struct rte_mbuf **rx_pkts,
1612 if (unlikely(nb_pkts == 0))
1615 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1616 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1619 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1620 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1621 nb_rx = (uint16_t)(nb_rx + count);
1622 nb_pkts = (uint16_t)(nb_pkts - count);
1631 ice_recv_scattered_pkts(void *rx_queue,
1632 struct rte_mbuf **rx_pkts,
1635 struct ice_rx_queue *rxq = rx_queue;
1636 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1637 volatile union ice_rx_flex_desc *rxdp;
1638 union ice_rx_flex_desc rxd;
1639 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1640 struct ice_rx_entry *rxe;
1641 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1642 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1643 struct rte_mbuf *nmb; /* new allocated mbuf */
1644 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1645 uint16_t rx_id = rxq->rx_tail;
1647 uint16_t nb_hold = 0;
1648 uint16_t rx_packet_len;
1649 uint16_t rx_stat_err0;
1652 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1653 struct rte_eth_dev *dev;
1655 while (nb_rx < nb_pkts) {
1656 rxdp = &rx_ring[rx_id];
1657 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1659 /* Check the DD bit first */
1660 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1664 nmb = rte_mbuf_raw_alloc(rxq->mp);
1665 if (unlikely(!nmb)) {
1666 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1667 dev->data->rx_mbuf_alloc_failed++;
1670 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1673 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1675 if (unlikely(rx_id == rxq->nb_rx_desc))
1678 /* Prefetch next mbuf */
1679 rte_prefetch0(sw_ring[rx_id].mbuf);
1682 * When next RX descriptor is on a cache line boundary,
1683 * prefetch the next 4 RX descriptors and next 8 pointers
1686 if ((rx_id & 0x3) == 0) {
1687 rte_prefetch0(&rx_ring[rx_id]);
1688 rte_prefetch0(&sw_ring[rx_id]);
1694 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1696 /* Set data buffer address and data length of the mbuf */
1697 rxdp->read.hdr_addr = 0;
1698 rxdp->read.pkt_addr = dma_addr;
1699 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1700 ICE_RX_FLX_DESC_PKT_LEN_M;
1701 rxm->data_len = rx_packet_len;
1702 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1705 * If this is the first buffer of the received packet, set the
1706 * pointer to the first mbuf of the packet and initialize its
1707 * context. Otherwise, update the total length and the number
1708 * of segments of the current scattered packet, and update the
1709 * pointer to the last mbuf of the current packet.
1713 first_seg->nb_segs = 1;
1714 first_seg->pkt_len = rx_packet_len;
1716 first_seg->pkt_len =
1717 (uint16_t)(first_seg->pkt_len +
1719 first_seg->nb_segs++;
1720 last_seg->next = rxm;
1724 * If this is not the last buffer of the received packet,
1725 * update the pointer to the last mbuf of the current scattered
1726 * packet and continue to parse the RX ring.
1728 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1734 * This is the last buffer of the received packet. If the CRC
1735 * is not stripped by the hardware:
1736 * - Subtract the CRC length from the total packet length.
1737 * - If the last buffer only contains the whole CRC or a part
1738 * of it, free the mbuf associated to the last buffer. If part
1739 * of the CRC is also contained in the previous mbuf, subtract
1740 * the length of that CRC part from the data length of the
1744 if (unlikely(rxq->crc_len > 0)) {
1745 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1746 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1747 rte_pktmbuf_free_seg(rxm);
1748 first_seg->nb_segs--;
1749 last_seg->data_len =
1750 (uint16_t)(last_seg->data_len -
1751 (RTE_ETHER_CRC_LEN - rx_packet_len));
1752 last_seg->next = NULL;
1754 rxm->data_len = (uint16_t)(rx_packet_len -
1758 first_seg->port = rxq->port_id;
1759 first_seg->ol_flags = 0;
1760 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1761 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1762 ice_rxd_to_vlan_tci(first_seg, &rxd);
1763 ice_rxd_to_pkt_fields(first_seg, &rxd);
1764 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1765 first_seg->ol_flags |= pkt_flags;
1766 /* Prefetch data of first segment, if configured to do so. */
1767 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1768 first_seg->data_off));
1769 rx_pkts[nb_rx++] = first_seg;
1773 /* Record index of the next RX descriptor to probe. */
1774 rxq->rx_tail = rx_id;
1775 rxq->pkt_first_seg = first_seg;
1776 rxq->pkt_last_seg = last_seg;
1779 * If the number of free RX descriptors is greater than the RX free
1780 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1781 * register. Update the RDT with the value of the last processed RX
1782 * descriptor minus 1, to guarantee that the RDT register is never
1783 * equal to the RDH register, which creates a "full" ring situtation
1784 * from the hardware point of view.
1786 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1787 if (nb_hold > rxq->rx_free_thresh) {
1788 rx_id = (uint16_t)(rx_id == 0 ?
1789 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1790 /* write TAIL register */
1791 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1794 rxq->nb_rx_hold = nb_hold;
1796 /* return received packet in the burst */
1801 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1803 struct ice_adapter *ad =
1804 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1805 const uint32_t *ptypes;
1807 static const uint32_t ptypes_os[] = {
1808 /* refers to ice_get_default_pkt_type() */
1810 RTE_PTYPE_L2_ETHER_TIMESYNC,
1811 RTE_PTYPE_L2_ETHER_LLDP,
1812 RTE_PTYPE_L2_ETHER_ARP,
1813 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1814 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1817 RTE_PTYPE_L4_NONFRAG,
1821 RTE_PTYPE_TUNNEL_GRENAT,
1822 RTE_PTYPE_TUNNEL_IP,
1823 RTE_PTYPE_INNER_L2_ETHER,
1824 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1825 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1826 RTE_PTYPE_INNER_L4_FRAG,
1827 RTE_PTYPE_INNER_L4_ICMP,
1828 RTE_PTYPE_INNER_L4_NONFRAG,
1829 RTE_PTYPE_INNER_L4_SCTP,
1830 RTE_PTYPE_INNER_L4_TCP,
1831 RTE_PTYPE_INNER_L4_UDP,
1835 static const uint32_t ptypes_comms[] = {
1836 /* refers to ice_get_default_pkt_type() */
1838 RTE_PTYPE_L2_ETHER_TIMESYNC,
1839 RTE_PTYPE_L2_ETHER_LLDP,
1840 RTE_PTYPE_L2_ETHER_ARP,
1841 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1842 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1845 RTE_PTYPE_L4_NONFRAG,
1849 RTE_PTYPE_TUNNEL_GRENAT,
1850 RTE_PTYPE_TUNNEL_IP,
1851 RTE_PTYPE_INNER_L2_ETHER,
1852 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1853 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1854 RTE_PTYPE_INNER_L4_FRAG,
1855 RTE_PTYPE_INNER_L4_ICMP,
1856 RTE_PTYPE_INNER_L4_NONFRAG,
1857 RTE_PTYPE_INNER_L4_SCTP,
1858 RTE_PTYPE_INNER_L4_TCP,
1859 RTE_PTYPE_INNER_L4_UDP,
1860 RTE_PTYPE_TUNNEL_GTPC,
1861 RTE_PTYPE_TUNNEL_GTPU,
1862 RTE_PTYPE_L2_ETHER_PPPOE,
1866 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1867 ptypes = ptypes_comms;
1871 if (dev->rx_pkt_burst == ice_recv_pkts ||
1872 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1873 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1877 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1878 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1879 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1880 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1888 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1890 volatile union ice_rx_flex_desc *rxdp;
1891 struct ice_rx_queue *rxq = rx_queue;
1894 if (unlikely(offset >= rxq->nb_rx_desc))
1897 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1898 return RTE_ETH_RX_DESC_UNAVAIL;
1900 desc = rxq->rx_tail + offset;
1901 if (desc >= rxq->nb_rx_desc)
1902 desc -= rxq->nb_rx_desc;
1904 rxdp = &rxq->rx_ring[desc];
1905 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1906 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
1907 return RTE_ETH_RX_DESC_DONE;
1909 return RTE_ETH_RX_DESC_AVAIL;
1913 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1915 struct ice_tx_queue *txq = tx_queue;
1916 volatile uint64_t *status;
1917 uint64_t mask, expect;
1920 if (unlikely(offset >= txq->nb_tx_desc))
1923 desc = txq->tx_tail + offset;
1924 /* go to next desc that has the RS bit */
1925 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1927 if (desc >= txq->nb_tx_desc) {
1928 desc -= txq->nb_tx_desc;
1929 if (desc >= txq->nb_tx_desc)
1930 desc -= txq->nb_tx_desc;
1933 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1934 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1935 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1936 ICE_TXD_QW1_DTYPE_S);
1937 if ((*status & mask) == expect)
1938 return RTE_ETH_TX_DESC_DONE;
1940 return RTE_ETH_TX_DESC_FULL;
1944 ice_free_queues(struct rte_eth_dev *dev)
1948 PMD_INIT_FUNC_TRACE();
1950 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1951 if (!dev->data->rx_queues[i])
1953 ice_rx_queue_release(dev->data->rx_queues[i]);
1954 dev->data->rx_queues[i] = NULL;
1955 rte_eth_dma_zone_free(dev, "rx_ring", i);
1957 dev->data->nb_rx_queues = 0;
1959 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1960 if (!dev->data->tx_queues[i])
1962 ice_tx_queue_release(dev->data->tx_queues[i]);
1963 dev->data->tx_queues[i] = NULL;
1964 rte_eth_dma_zone_free(dev, "tx_ring", i);
1966 dev->data->nb_tx_queues = 0;
1969 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
1970 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
1973 ice_fdir_setup_tx_resources(struct ice_pf *pf)
1975 struct ice_tx_queue *txq;
1976 const struct rte_memzone *tz = NULL;
1978 struct rte_eth_dev *dev;
1981 PMD_DRV_LOG(ERR, "PF is not available");
1985 dev = pf->adapter->eth_dev;
1987 /* Allocate the TX queue data structure. */
1988 txq = rte_zmalloc_socket("ice fdir tx queue",
1989 sizeof(struct ice_tx_queue),
1990 RTE_CACHE_LINE_SIZE,
1993 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1994 "tx queue structure.");
1998 /* Allocate TX hardware ring descriptors. */
1999 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2000 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2002 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2003 ICE_FDIR_QUEUE_ID, ring_size,
2004 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2006 ice_tx_queue_release(txq);
2007 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2011 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2012 txq->queue_id = ICE_FDIR_QUEUE_ID;
2013 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2014 txq->vsi = pf->fdir.fdir_vsi;
2016 txq->tx_ring_dma = tz->iova;
2017 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2019 * don't need to allocate software ring and reset for the fdir
2020 * program queue just set the queue has been configured.
2025 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2031 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2033 struct ice_rx_queue *rxq;
2034 const struct rte_memzone *rz = NULL;
2036 struct rte_eth_dev *dev;
2039 PMD_DRV_LOG(ERR, "PF is not available");
2043 dev = pf->adapter->eth_dev;
2045 /* Allocate the RX queue data structure. */
2046 rxq = rte_zmalloc_socket("ice fdir rx queue",
2047 sizeof(struct ice_rx_queue),
2048 RTE_CACHE_LINE_SIZE,
2051 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2052 "rx queue structure.");
2056 /* Allocate RX hardware ring descriptors. */
2057 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2058 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2060 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2061 ICE_FDIR_QUEUE_ID, ring_size,
2062 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2064 ice_rx_queue_release(rxq);
2065 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2069 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2070 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2071 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2072 rxq->vsi = pf->fdir.fdir_vsi;
2074 rxq->rx_ring_dma = rz->iova;
2075 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2076 sizeof(union ice_32byte_rx_desc));
2077 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2080 * Don't need to allocate software ring and reset for the fdir
2081 * rx queue, just set the queue has been configured.
2086 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2092 ice_recv_pkts(void *rx_queue,
2093 struct rte_mbuf **rx_pkts,
2096 struct ice_rx_queue *rxq = rx_queue;
2097 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2098 volatile union ice_rx_flex_desc *rxdp;
2099 union ice_rx_flex_desc rxd;
2100 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2101 struct ice_rx_entry *rxe;
2102 struct rte_mbuf *nmb; /* new allocated mbuf */
2103 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2104 uint16_t rx_id = rxq->rx_tail;
2106 uint16_t nb_hold = 0;
2107 uint16_t rx_packet_len;
2108 uint16_t rx_stat_err0;
2111 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2112 struct rte_eth_dev *dev;
2114 while (nb_rx < nb_pkts) {
2115 rxdp = &rx_ring[rx_id];
2116 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2118 /* Check the DD bit first */
2119 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2123 nmb = rte_mbuf_raw_alloc(rxq->mp);
2124 if (unlikely(!nmb)) {
2125 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2126 dev->data->rx_mbuf_alloc_failed++;
2129 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2132 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2134 if (unlikely(rx_id == rxq->nb_rx_desc))
2139 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2142 * fill the read format of descriptor with physic address in
2143 * new allocated mbuf: nmb
2145 rxdp->read.hdr_addr = 0;
2146 rxdp->read.pkt_addr = dma_addr;
2148 /* calculate rx_packet_len of the received pkt */
2149 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2150 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2152 /* fill old mbuf with received descriptor: rxd */
2153 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2154 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2157 rxm->pkt_len = rx_packet_len;
2158 rxm->data_len = rx_packet_len;
2159 rxm->port = rxq->port_id;
2160 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2161 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2162 ice_rxd_to_vlan_tci(rxm, &rxd);
2163 ice_rxd_to_pkt_fields(rxm, &rxd);
2164 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2165 rxm->ol_flags |= pkt_flags;
2166 /* copy old mbuf to rx_pkts */
2167 rx_pkts[nb_rx++] = rxm;
2169 rxq->rx_tail = rx_id;
2171 * If the number of free RX descriptors is greater than the RX free
2172 * threshold of the queue, advance the receive tail register of queue.
2173 * Update that register with the value of the last processed RX
2174 * descriptor minus 1.
2176 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2177 if (nb_hold > rxq->rx_free_thresh) {
2178 rx_id = (uint16_t)(rx_id == 0 ?
2179 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2180 /* write TAIL register */
2181 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
2184 rxq->nb_rx_hold = nb_hold;
2186 /* return received packet in the burst */
2191 ice_parse_tunneling_params(uint64_t ol_flags,
2192 union ice_tx_offload tx_offload,
2193 uint32_t *cd_tunneling)
2195 /* EIPT: External (outer) IP header type */
2196 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2197 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2198 else if (ol_flags & PKT_TX_OUTER_IPV4)
2199 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2200 else if (ol_flags & PKT_TX_OUTER_IPV6)
2201 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2203 /* EIPLEN: External (outer) IP header length, in DWords */
2204 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2205 ICE_TXD_CTX_QW0_EIPLEN_S;
2207 /* L4TUNT: L4 Tunneling Type */
2208 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2209 case PKT_TX_TUNNEL_IPIP:
2210 /* for non UDP / GRE tunneling, set to 00b */
2212 case PKT_TX_TUNNEL_VXLAN:
2213 case PKT_TX_TUNNEL_GTP:
2214 case PKT_TX_TUNNEL_GENEVE:
2215 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2217 case PKT_TX_TUNNEL_GRE:
2218 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2221 PMD_TX_LOG(ERR, "Tunnel type not supported");
2225 /* L4TUNLEN: L4 Tunneling Length, in Words
2227 * We depend on app to set rte_mbuf.l2_len correctly.
2228 * For IP in GRE it should be set to the length of the GRE
2230 * For MAC in GRE or MAC in UDP it should be set to the length
2231 * of the GRE or UDP headers plus the inner MAC up to including
2232 * its last Ethertype.
2233 * If MPLS labels exists, it should include them as well.
2235 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2236 ICE_TXD_CTX_QW0_NATLEN_S;
2238 if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
2239 (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2240 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2241 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2245 ice_txd_enable_checksum(uint64_t ol_flags,
2247 uint32_t *td_offset,
2248 union ice_tx_offload tx_offload)
2251 if (ol_flags & PKT_TX_TUNNEL_MASK)
2252 *td_offset |= (tx_offload.outer_l2_len >> 1)
2253 << ICE_TX_DESC_LEN_MACLEN_S;
2255 *td_offset |= (tx_offload.l2_len >> 1)
2256 << ICE_TX_DESC_LEN_MACLEN_S;
2258 /* Enable L3 checksum offloads */
2259 if (ol_flags & PKT_TX_IP_CKSUM) {
2260 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2261 *td_offset |= (tx_offload.l3_len >> 2) <<
2262 ICE_TX_DESC_LEN_IPLEN_S;
2263 } else if (ol_flags & PKT_TX_IPV4) {
2264 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2265 *td_offset |= (tx_offload.l3_len >> 2) <<
2266 ICE_TX_DESC_LEN_IPLEN_S;
2267 } else if (ol_flags & PKT_TX_IPV6) {
2268 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2269 *td_offset |= (tx_offload.l3_len >> 2) <<
2270 ICE_TX_DESC_LEN_IPLEN_S;
2273 if (ol_flags & PKT_TX_TCP_SEG) {
2274 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2275 *td_offset |= (tx_offload.l4_len >> 2) <<
2276 ICE_TX_DESC_LEN_L4_LEN_S;
2280 /* Enable L4 checksum offloads */
2281 switch (ol_flags & PKT_TX_L4_MASK) {
2282 case PKT_TX_TCP_CKSUM:
2283 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2284 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2285 ICE_TX_DESC_LEN_L4_LEN_S;
2287 case PKT_TX_SCTP_CKSUM:
2288 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2289 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2290 ICE_TX_DESC_LEN_L4_LEN_S;
2292 case PKT_TX_UDP_CKSUM:
2293 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2294 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2295 ICE_TX_DESC_LEN_L4_LEN_S;
2303 ice_xmit_cleanup(struct ice_tx_queue *txq)
2305 struct ice_tx_entry *sw_ring = txq->sw_ring;
2306 volatile struct ice_tx_desc *txd = txq->tx_ring;
2307 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2308 uint16_t nb_tx_desc = txq->nb_tx_desc;
2309 uint16_t desc_to_clean_to;
2310 uint16_t nb_tx_to_clean;
2312 /* Determine the last descriptor needing to be cleaned */
2313 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2314 if (desc_to_clean_to >= nb_tx_desc)
2315 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2317 /* Check to make sure the last descriptor to clean is done */
2318 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2319 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2320 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2321 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
2322 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2324 txq->port_id, txq->queue_id,
2325 txd[desc_to_clean_to].cmd_type_offset_bsz);
2326 /* Failed to clean any descriptors */
2330 /* Figure out how many descriptors will be cleaned */
2331 if (last_desc_cleaned > desc_to_clean_to)
2332 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2335 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2338 /* The last descriptor to clean is done, so that means all the
2339 * descriptors from the last descriptor that was cleaned
2340 * up to the last descriptor with the RS bit set
2341 * are done. Only reset the threshold descriptor.
2343 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2345 /* Update the txq to reflect the last descriptor that was cleaned */
2346 txq->last_desc_cleaned = desc_to_clean_to;
2347 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2352 /* Construct the tx flags */
2353 static inline uint64_t
2354 ice_build_ctob(uint32_t td_cmd,
2359 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2360 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2361 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2362 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2363 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2366 /* Check if the context descriptor is needed for TX offloading */
2367 static inline uint16_t
2368 ice_calc_context_desc(uint64_t flags)
2370 static uint64_t mask = PKT_TX_TCP_SEG |
2372 PKT_TX_OUTER_IP_CKSUM |
2375 return (flags & mask) ? 1 : 0;
2378 /* set ice TSO context descriptor */
2379 static inline uint64_t
2380 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2382 uint64_t ctx_desc = 0;
2383 uint32_t cd_cmd, hdr_len, cd_tso_len;
2385 if (!tx_offload.l4_len) {
2386 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2390 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2391 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2392 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2394 cd_cmd = ICE_TX_CTX_DESC_TSO;
2395 cd_tso_len = mbuf->pkt_len - hdr_len;
2396 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2397 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2398 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2403 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2404 #define ICE_MAX_DATA_PER_TXD \
2405 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2406 /* Calculate the number of TX descriptors needed for each pkt */
2407 static inline uint16_t
2408 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2410 struct rte_mbuf *txd = tx_pkt;
2413 while (txd != NULL) {
2414 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2422 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2424 struct ice_tx_queue *txq;
2425 volatile struct ice_tx_desc *tx_ring;
2426 volatile struct ice_tx_desc *txd;
2427 struct ice_tx_entry *sw_ring;
2428 struct ice_tx_entry *txe, *txn;
2429 struct rte_mbuf *tx_pkt;
2430 struct rte_mbuf *m_seg;
2431 uint32_t cd_tunneling_params;
2436 uint32_t td_cmd = 0;
2437 uint32_t td_offset = 0;
2438 uint32_t td_tag = 0;
2441 uint64_t buf_dma_addr;
2443 union ice_tx_offload tx_offload = {0};
2446 sw_ring = txq->sw_ring;
2447 tx_ring = txq->tx_ring;
2448 tx_id = txq->tx_tail;
2449 txe = &sw_ring[tx_id];
2451 /* Check if the descriptor ring needs to be cleaned. */
2452 if (txq->nb_tx_free < txq->tx_free_thresh)
2453 (void)ice_xmit_cleanup(txq);
2455 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2456 tx_pkt = *tx_pkts++;
2461 ol_flags = tx_pkt->ol_flags;
2462 tx_offload.l2_len = tx_pkt->l2_len;
2463 tx_offload.l3_len = tx_pkt->l3_len;
2464 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2465 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2466 tx_offload.l4_len = tx_pkt->l4_len;
2467 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2468 /* Calculate the number of context descriptors needed. */
2469 nb_ctx = ice_calc_context_desc(ol_flags);
2471 /* The number of descriptors that must be allocated for
2472 * a packet equals to the number of the segments of that
2473 * packet plus the number of context descriptor if needed.
2474 * Recalculate the needed tx descs when TSO enabled in case
2475 * the mbuf data size exceeds max data size that hw allows
2478 if (ol_flags & PKT_TX_TCP_SEG)
2479 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2482 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2483 tx_last = (uint16_t)(tx_id + nb_used - 1);
2486 if (tx_last >= txq->nb_tx_desc)
2487 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2489 if (nb_used > txq->nb_tx_free) {
2490 if (ice_xmit_cleanup(txq) != 0) {
2495 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2496 while (nb_used > txq->nb_tx_free) {
2497 if (ice_xmit_cleanup(txq) != 0) {
2506 /* Descriptor based VLAN insertion */
2507 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2508 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2509 td_tag = tx_pkt->vlan_tci;
2512 /* Fill in tunneling parameters if necessary */
2513 cd_tunneling_params = 0;
2514 if (ol_flags & PKT_TX_TUNNEL_MASK)
2515 ice_parse_tunneling_params(ol_flags, tx_offload,
2516 &cd_tunneling_params);
2518 /* Enable checksum offloading */
2519 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2520 ice_txd_enable_checksum(ol_flags, &td_cmd,
2521 &td_offset, tx_offload);
2524 /* Setup TX context descriptor if required */
2525 volatile struct ice_tx_ctx_desc *ctx_txd =
2526 (volatile struct ice_tx_ctx_desc *)
2528 uint16_t cd_l2tag2 = 0;
2529 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2531 txn = &sw_ring[txe->next_id];
2532 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2534 rte_pktmbuf_free_seg(txe->mbuf);
2538 if (ol_flags & PKT_TX_TCP_SEG)
2539 cd_type_cmd_tso_mss |=
2540 ice_set_tso_ctx(tx_pkt, tx_offload);
2542 ctx_txd->tunneling_params =
2543 rte_cpu_to_le_32(cd_tunneling_params);
2545 /* TX context descriptor based double VLAN insert */
2546 if (ol_flags & PKT_TX_QINQ) {
2547 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2548 cd_type_cmd_tso_mss |=
2549 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2550 ICE_TXD_CTX_QW1_CMD_S);
2552 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2554 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2556 txe->last_id = tx_last;
2557 tx_id = txe->next_id;
2563 txd = &tx_ring[tx_id];
2564 txn = &sw_ring[txe->next_id];
2567 rte_pktmbuf_free_seg(txe->mbuf);
2570 /* Setup TX Descriptor */
2571 slen = m_seg->data_len;
2572 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2574 while ((ol_flags & PKT_TX_TCP_SEG) &&
2575 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2576 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2577 txd->cmd_type_offset_bsz =
2578 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2579 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2580 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2581 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2582 ICE_TXD_QW1_TX_BUF_SZ_S) |
2583 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2585 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2586 slen -= ICE_MAX_DATA_PER_TXD;
2588 txe->last_id = tx_last;
2589 tx_id = txe->next_id;
2591 txd = &tx_ring[tx_id];
2592 txn = &sw_ring[txe->next_id];
2595 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2596 txd->cmd_type_offset_bsz =
2597 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2598 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2599 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2600 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2601 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2603 txe->last_id = tx_last;
2604 tx_id = txe->next_id;
2606 m_seg = m_seg->next;
2609 /* fill the last descriptor with End of Packet (EOP) bit */
2610 td_cmd |= ICE_TX_DESC_CMD_EOP;
2611 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2612 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2614 /* set RS bit on the last descriptor of one packet */
2615 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2616 PMD_TX_FREE_LOG(DEBUG,
2617 "Setting RS bit on TXD id="
2618 "%4u (port=%d queue=%d)",
2619 tx_last, txq->port_id, txq->queue_id);
2621 td_cmd |= ICE_TX_DESC_CMD_RS;
2623 /* Update txq RS bit counters */
2624 txq->nb_tx_used = 0;
2626 txd->cmd_type_offset_bsz |=
2627 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2631 /* update Tail register */
2632 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2633 txq->tx_tail = tx_id;
2638 static __rte_always_inline int
2639 ice_tx_free_bufs(struct ice_tx_queue *txq)
2641 struct ice_tx_entry *txep;
2644 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2645 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2646 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2649 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2651 for (i = 0; i < txq->tx_rs_thresh; i++)
2652 rte_prefetch0((txep + i)->mbuf);
2654 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2655 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2656 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2660 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2661 rte_pktmbuf_free_seg(txep->mbuf);
2666 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2667 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2668 if (txq->tx_next_dd >= txq->nb_tx_desc)
2669 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2671 return txq->tx_rs_thresh;
2675 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2678 struct ice_tx_entry *swr_ring = txq->sw_ring;
2679 uint16_t i, tx_last, tx_id;
2680 uint16_t nb_tx_free_last;
2681 uint16_t nb_tx_to_clean;
2684 /* Start free mbuf from the next of tx_tail */
2685 tx_last = txq->tx_tail;
2686 tx_id = swr_ring[tx_last].next_id;
2688 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2691 nb_tx_to_clean = txq->nb_tx_free;
2692 nb_tx_free_last = txq->nb_tx_free;
2694 free_cnt = txq->nb_tx_desc;
2696 /* Loop through swr_ring to count the amount of
2697 * freeable mubfs and packets.
2699 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2700 for (i = 0; i < nb_tx_to_clean &&
2701 pkt_cnt < free_cnt &&
2702 tx_id != tx_last; i++) {
2703 if (swr_ring[tx_id].mbuf != NULL) {
2704 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2705 swr_ring[tx_id].mbuf = NULL;
2708 * last segment in the packet,
2709 * increment packet count
2711 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2714 tx_id = swr_ring[tx_id].next_id;
2717 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2718 txq->nb_tx_free || tx_id == tx_last)
2721 if (pkt_cnt < free_cnt) {
2722 if (ice_xmit_cleanup(txq))
2725 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2726 nb_tx_free_last = txq->nb_tx_free;
2730 return (int)pkt_cnt;
2735 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2736 uint32_t free_cnt __rte_unused)
2743 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2748 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2749 free_cnt = txq->nb_tx_desc;
2751 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2753 for (i = 0; i < cnt; i += n) {
2754 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2757 n = ice_tx_free_bufs(txq);
2767 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2769 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2770 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2771 struct ice_adapter *ad =
2772 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2775 if (ad->tx_vec_allowed)
2776 return ice_tx_done_cleanup_vec(q, free_cnt);
2778 if (ad->tx_simple_allowed)
2779 return ice_tx_done_cleanup_simple(q, free_cnt);
2781 return ice_tx_done_cleanup_full(q, free_cnt);
2784 /* Populate 4 descriptors with data from 4 mbufs */
2786 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2791 for (i = 0; i < 4; i++, txdp++, pkts++) {
2792 dma_addr = rte_mbuf_data_iova(*pkts);
2793 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2794 txdp->cmd_type_offset_bsz =
2795 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2796 (*pkts)->data_len, 0);
2800 /* Populate 1 descriptor with data from 1 mbuf */
2802 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2806 dma_addr = rte_mbuf_data_iova(*pkts);
2807 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2808 txdp->cmd_type_offset_bsz =
2809 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2810 (*pkts)->data_len, 0);
2814 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2817 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2818 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2819 const int N_PER_LOOP = 4;
2820 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2821 int mainpart, leftover;
2825 * Process most of the packets in chunks of N pkts. Any
2826 * leftover packets will get processed one at a time.
2828 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2829 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2830 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2831 /* Copy N mbuf pointers to the S/W ring */
2832 for (j = 0; j < N_PER_LOOP; ++j)
2833 (txep + i + j)->mbuf = *(pkts + i + j);
2834 tx4(txdp + i, pkts + i);
2837 if (unlikely(leftover > 0)) {
2838 for (i = 0; i < leftover; ++i) {
2839 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2840 tx1(txdp + mainpart + i, pkts + mainpart + i);
2845 static inline uint16_t
2846 tx_xmit_pkts(struct ice_tx_queue *txq,
2847 struct rte_mbuf **tx_pkts,
2850 volatile struct ice_tx_desc *txr = txq->tx_ring;
2854 * Begin scanning the H/W ring for done descriptors when the number
2855 * of available descriptors drops below tx_free_thresh. For each done
2856 * descriptor, free the associated buffer.
2858 if (txq->nb_tx_free < txq->tx_free_thresh)
2859 ice_tx_free_bufs(txq);
2861 /* Use available descriptor only */
2862 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2863 if (unlikely(!nb_pkts))
2866 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2867 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2868 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2869 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2870 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2871 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2873 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2877 /* Fill hardware descriptor ring with mbuf data */
2878 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2879 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2881 /* Determin if RS bit needs to be set */
2882 if (txq->tx_tail > txq->tx_next_rs) {
2883 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2884 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2887 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2888 if (txq->tx_next_rs >= txq->nb_tx_desc)
2889 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2892 if (txq->tx_tail >= txq->nb_tx_desc)
2895 /* Update the tx tail register */
2896 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2902 ice_xmit_pkts_simple(void *tx_queue,
2903 struct rte_mbuf **tx_pkts,
2908 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2909 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2913 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2916 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2917 &tx_pkts[nb_tx], num);
2918 nb_tx = (uint16_t)(nb_tx + ret);
2919 nb_pkts = (uint16_t)(nb_pkts - ret);
2928 ice_set_rx_function(struct rte_eth_dev *dev)
2930 PMD_INIT_FUNC_TRACE();
2931 struct ice_adapter *ad =
2932 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2934 struct ice_rx_queue *rxq;
2936 bool use_avx2 = false;
2938 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2939 if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
2940 ad->rx_vec_allowed = true;
2941 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2942 rxq = dev->data->rx_queues[i];
2943 if (rxq && ice_rxq_vec_setup(rxq)) {
2944 ad->rx_vec_allowed = false;
2949 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2950 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2954 ad->rx_vec_allowed = false;
2958 if (ad->rx_vec_allowed) {
2959 if (dev->data->scattered_rx) {
2961 "Using %sVector Scattered Rx (port %d).",
2962 use_avx2 ? "avx2 " : "",
2963 dev->data->port_id);
2964 dev->rx_pkt_burst = use_avx2 ?
2965 ice_recv_scattered_pkts_vec_avx2 :
2966 ice_recv_scattered_pkts_vec;
2968 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2969 use_avx2 ? "avx2 " : "",
2970 dev->data->port_id);
2971 dev->rx_pkt_burst = use_avx2 ?
2972 ice_recv_pkts_vec_avx2 :
2980 if (dev->data->scattered_rx) {
2981 /* Set the non-LRO scattered function */
2983 "Using a Scattered function on port %d.",
2984 dev->data->port_id);
2985 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2986 } else if (ad->rx_bulk_alloc_allowed) {
2988 "Rx Burst Bulk Alloc Preconditions are "
2989 "satisfied. Rx Burst Bulk Alloc function "
2990 "will be used on port %d.",
2991 dev->data->port_id);
2992 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2995 "Rx Burst Bulk Alloc Preconditions are not "
2996 "satisfied, Normal Rx will be used on port %d.",
2997 dev->data->port_id);
2998 dev->rx_pkt_burst = ice_recv_pkts;
3002 static const struct {
3003 eth_rx_burst_t pkt_burst;
3005 } ice_rx_burst_infos[] = {
3006 { ice_recv_scattered_pkts, "Scalar Scattered" },
3007 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3008 { ice_recv_pkts, "Scalar" },
3010 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3011 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3012 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3013 { ice_recv_pkts_vec, "Vector SSE" },
3018 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3019 struct rte_eth_burst_mode *mode)
3021 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3025 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3026 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3027 snprintf(mode->info, sizeof(mode->info), "%s",
3028 ice_rx_burst_infos[i].info);
3038 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3040 struct ice_adapter *ad =
3041 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3043 /* Use a simple Tx queue if possible (only fast free is allowed) */
3044 ad->tx_simple_allowed =
3046 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3047 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3049 if (ad->tx_simple_allowed)
3050 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3054 "Simple Tx can NOT be enabled on Tx queue %u.",
3058 /*********************************************************************
3062 **********************************************************************/
3063 /* The default values of TSO MSS */
3064 #define ICE_MIN_TSO_MSS 64
3065 #define ICE_MAX_TSO_MSS 9728
3066 #define ICE_MAX_TSO_FRAME_SIZE 262144
3068 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3075 for (i = 0; i < nb_pkts; i++) {
3077 ol_flags = m->ol_flags;
3079 if (ol_flags & PKT_TX_TCP_SEG &&
3080 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3081 m->tso_segsz > ICE_MAX_TSO_MSS ||
3082 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3084 * MSS outside the range are considered malicious
3090 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3091 ret = rte_validate_tx_offload(m);
3097 ret = rte_net_intel_cksum_prepare(m);
3107 ice_set_tx_function(struct rte_eth_dev *dev)
3109 struct ice_adapter *ad =
3110 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3112 struct ice_tx_queue *txq;
3114 bool use_avx2 = false;
3116 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3117 if (!ice_tx_vec_dev_check(dev)) {
3118 ad->tx_vec_allowed = true;
3119 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3120 txq = dev->data->tx_queues[i];
3121 if (txq && ice_txq_vec_setup(txq)) {
3122 ad->tx_vec_allowed = false;
3127 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3128 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
3132 ad->tx_vec_allowed = false;
3136 if (ad->tx_vec_allowed) {
3137 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3138 use_avx2 ? "avx2 " : "",
3139 dev->data->port_id);
3140 dev->tx_pkt_burst = use_avx2 ?
3141 ice_xmit_pkts_vec_avx2 :
3143 dev->tx_pkt_prepare = NULL;
3149 if (ad->tx_simple_allowed) {
3150 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3151 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3152 dev->tx_pkt_prepare = NULL;
3154 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3155 dev->tx_pkt_burst = ice_xmit_pkts;
3156 dev->tx_pkt_prepare = ice_prep_pkts;
3160 static const struct {
3161 eth_tx_burst_t pkt_burst;
3163 } ice_tx_burst_infos[] = {
3164 { ice_xmit_pkts_simple, "Scalar Simple" },
3165 { ice_xmit_pkts, "Scalar" },
3167 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3168 { ice_xmit_pkts_vec, "Vector SSE" },
3173 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3174 struct rte_eth_burst_mode *mode)
3176 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3180 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3181 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3182 snprintf(mode->info, sizeof(mode->info), "%s",
3183 ice_tx_burst_infos[i].info);
3192 /* For each value it means, datasheet of hardware can tell more details
3194 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3196 static inline uint32_t
3197 ice_get_default_pkt_type(uint16_t ptype)
3199 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3200 __rte_cache_aligned = {
3203 [1] = RTE_PTYPE_L2_ETHER,
3204 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3205 /* [3] - [5] reserved */
3206 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3207 /* [7] - [10] reserved */
3208 [11] = RTE_PTYPE_L2_ETHER_ARP,
3209 /* [12] - [21] reserved */
3211 /* Non tunneled IPv4 */
3212 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3214 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3215 RTE_PTYPE_L4_NONFRAG,
3216 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3219 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3221 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3223 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3227 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3228 RTE_PTYPE_TUNNEL_IP |
3229 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3230 RTE_PTYPE_INNER_L4_FRAG,
3231 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3232 RTE_PTYPE_TUNNEL_IP |
3233 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3234 RTE_PTYPE_INNER_L4_NONFRAG,
3235 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3236 RTE_PTYPE_TUNNEL_IP |
3237 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3238 RTE_PTYPE_INNER_L4_UDP,
3240 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3241 RTE_PTYPE_TUNNEL_IP |
3242 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3243 RTE_PTYPE_INNER_L4_TCP,
3244 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3245 RTE_PTYPE_TUNNEL_IP |
3246 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3247 RTE_PTYPE_INNER_L4_SCTP,
3248 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3249 RTE_PTYPE_TUNNEL_IP |
3250 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3251 RTE_PTYPE_INNER_L4_ICMP,
3254 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3255 RTE_PTYPE_TUNNEL_IP |
3256 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3257 RTE_PTYPE_INNER_L4_FRAG,
3258 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3259 RTE_PTYPE_TUNNEL_IP |
3260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3261 RTE_PTYPE_INNER_L4_NONFRAG,
3262 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3263 RTE_PTYPE_TUNNEL_IP |
3264 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3265 RTE_PTYPE_INNER_L4_UDP,
3267 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3268 RTE_PTYPE_TUNNEL_IP |
3269 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3270 RTE_PTYPE_INNER_L4_TCP,
3271 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3272 RTE_PTYPE_TUNNEL_IP |
3273 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3274 RTE_PTYPE_INNER_L4_SCTP,
3275 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3276 RTE_PTYPE_TUNNEL_IP |
3277 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3278 RTE_PTYPE_INNER_L4_ICMP,
3280 /* IPv4 --> GRE/Teredo/VXLAN */
3281 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3282 RTE_PTYPE_TUNNEL_GRENAT,
3284 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3285 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3286 RTE_PTYPE_TUNNEL_GRENAT |
3287 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3288 RTE_PTYPE_INNER_L4_FRAG,
3289 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3290 RTE_PTYPE_TUNNEL_GRENAT |
3291 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3292 RTE_PTYPE_INNER_L4_NONFRAG,
3293 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3294 RTE_PTYPE_TUNNEL_GRENAT |
3295 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3296 RTE_PTYPE_INNER_L4_UDP,
3298 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3299 RTE_PTYPE_TUNNEL_GRENAT |
3300 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3301 RTE_PTYPE_INNER_L4_TCP,
3302 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3303 RTE_PTYPE_TUNNEL_GRENAT |
3304 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3305 RTE_PTYPE_INNER_L4_SCTP,
3306 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3307 RTE_PTYPE_TUNNEL_GRENAT |
3308 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3309 RTE_PTYPE_INNER_L4_ICMP,
3311 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3312 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3313 RTE_PTYPE_TUNNEL_GRENAT |
3314 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3315 RTE_PTYPE_INNER_L4_FRAG,
3316 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3317 RTE_PTYPE_TUNNEL_GRENAT |
3318 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3319 RTE_PTYPE_INNER_L4_NONFRAG,
3320 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3321 RTE_PTYPE_TUNNEL_GRENAT |
3322 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3323 RTE_PTYPE_INNER_L4_UDP,
3325 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3326 RTE_PTYPE_TUNNEL_GRENAT |
3327 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3328 RTE_PTYPE_INNER_L4_TCP,
3329 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3330 RTE_PTYPE_TUNNEL_GRENAT |
3331 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3332 RTE_PTYPE_INNER_L4_SCTP,
3333 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3334 RTE_PTYPE_TUNNEL_GRENAT |
3335 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3336 RTE_PTYPE_INNER_L4_ICMP,
3338 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3339 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3340 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3342 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3343 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3344 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3345 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3346 RTE_PTYPE_INNER_L4_FRAG,
3347 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3348 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3349 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3350 RTE_PTYPE_INNER_L4_NONFRAG,
3351 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3352 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3353 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3354 RTE_PTYPE_INNER_L4_UDP,
3356 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3357 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3358 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3359 RTE_PTYPE_INNER_L4_TCP,
3360 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3361 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3362 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3363 RTE_PTYPE_INNER_L4_SCTP,
3364 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3365 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3366 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3367 RTE_PTYPE_INNER_L4_ICMP,
3369 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3370 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3371 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3372 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3373 RTE_PTYPE_INNER_L4_FRAG,
3374 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3375 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3376 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3377 RTE_PTYPE_INNER_L4_NONFRAG,
3378 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3379 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3380 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3381 RTE_PTYPE_INNER_L4_UDP,
3383 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3384 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3385 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3386 RTE_PTYPE_INNER_L4_TCP,
3387 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3388 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3389 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3390 RTE_PTYPE_INNER_L4_SCTP,
3391 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3392 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3393 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3394 RTE_PTYPE_INNER_L4_ICMP,
3395 /* [73] - [87] reserved */
3397 /* Non tunneled IPv6 */
3398 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3400 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3401 RTE_PTYPE_L4_NONFRAG,
3402 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3405 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3407 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3409 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3413 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3414 RTE_PTYPE_TUNNEL_IP |
3415 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3416 RTE_PTYPE_INNER_L4_FRAG,
3417 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3418 RTE_PTYPE_TUNNEL_IP |
3419 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3420 RTE_PTYPE_INNER_L4_NONFRAG,
3421 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3422 RTE_PTYPE_TUNNEL_IP |
3423 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3424 RTE_PTYPE_INNER_L4_UDP,
3426 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3427 RTE_PTYPE_TUNNEL_IP |
3428 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3429 RTE_PTYPE_INNER_L4_TCP,
3430 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3431 RTE_PTYPE_TUNNEL_IP |
3432 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3433 RTE_PTYPE_INNER_L4_SCTP,
3434 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3435 RTE_PTYPE_TUNNEL_IP |
3436 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3437 RTE_PTYPE_INNER_L4_ICMP,
3440 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3441 RTE_PTYPE_TUNNEL_IP |
3442 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3443 RTE_PTYPE_INNER_L4_FRAG,
3444 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3445 RTE_PTYPE_TUNNEL_IP |
3446 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3447 RTE_PTYPE_INNER_L4_NONFRAG,
3448 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3449 RTE_PTYPE_TUNNEL_IP |
3450 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3451 RTE_PTYPE_INNER_L4_UDP,
3452 /* [105] reserved */
3453 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3454 RTE_PTYPE_TUNNEL_IP |
3455 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3456 RTE_PTYPE_INNER_L4_TCP,
3457 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3458 RTE_PTYPE_TUNNEL_IP |
3459 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3460 RTE_PTYPE_INNER_L4_SCTP,
3461 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3462 RTE_PTYPE_TUNNEL_IP |
3463 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3464 RTE_PTYPE_INNER_L4_ICMP,
3466 /* IPv6 --> GRE/Teredo/VXLAN */
3467 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3468 RTE_PTYPE_TUNNEL_GRENAT,
3470 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3471 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3472 RTE_PTYPE_TUNNEL_GRENAT |
3473 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3474 RTE_PTYPE_INNER_L4_FRAG,
3475 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3476 RTE_PTYPE_TUNNEL_GRENAT |
3477 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3478 RTE_PTYPE_INNER_L4_NONFRAG,
3479 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3480 RTE_PTYPE_TUNNEL_GRENAT |
3481 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3482 RTE_PTYPE_INNER_L4_UDP,
3483 /* [113] reserved */
3484 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3485 RTE_PTYPE_TUNNEL_GRENAT |
3486 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3487 RTE_PTYPE_INNER_L4_TCP,
3488 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3489 RTE_PTYPE_TUNNEL_GRENAT |
3490 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3491 RTE_PTYPE_INNER_L4_SCTP,
3492 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3493 RTE_PTYPE_TUNNEL_GRENAT |
3494 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3495 RTE_PTYPE_INNER_L4_ICMP,
3497 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3498 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3499 RTE_PTYPE_TUNNEL_GRENAT |
3500 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3501 RTE_PTYPE_INNER_L4_FRAG,
3502 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3503 RTE_PTYPE_TUNNEL_GRENAT |
3504 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3505 RTE_PTYPE_INNER_L4_NONFRAG,
3506 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3507 RTE_PTYPE_TUNNEL_GRENAT |
3508 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3509 RTE_PTYPE_INNER_L4_UDP,
3510 /* [120] reserved */
3511 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3512 RTE_PTYPE_TUNNEL_GRENAT |
3513 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3514 RTE_PTYPE_INNER_L4_TCP,
3515 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3516 RTE_PTYPE_TUNNEL_GRENAT |
3517 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3518 RTE_PTYPE_INNER_L4_SCTP,
3519 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3520 RTE_PTYPE_TUNNEL_GRENAT |
3521 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3522 RTE_PTYPE_INNER_L4_ICMP,
3524 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3525 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3526 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3528 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3529 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3530 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3531 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3532 RTE_PTYPE_INNER_L4_FRAG,
3533 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3534 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3535 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3536 RTE_PTYPE_INNER_L4_NONFRAG,
3537 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3538 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3539 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3540 RTE_PTYPE_INNER_L4_UDP,
3541 /* [128] reserved */
3542 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3543 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3544 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3545 RTE_PTYPE_INNER_L4_TCP,
3546 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3547 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3548 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3549 RTE_PTYPE_INNER_L4_SCTP,
3550 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3551 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3552 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3553 RTE_PTYPE_INNER_L4_ICMP,
3555 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3556 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3557 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3558 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3559 RTE_PTYPE_INNER_L4_FRAG,
3560 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3561 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3562 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3563 RTE_PTYPE_INNER_L4_NONFRAG,
3564 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3565 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3566 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3567 RTE_PTYPE_INNER_L4_UDP,
3568 /* [135] reserved */
3569 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3570 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3571 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3572 RTE_PTYPE_INNER_L4_TCP,
3573 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3574 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3575 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3576 RTE_PTYPE_INNER_L4_SCTP,
3577 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3578 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3579 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3580 RTE_PTYPE_INNER_L4_ICMP,
3581 /* [139] - [299] reserved */
3584 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3585 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3587 /* PPPoE --> IPv4 */
3588 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3589 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3591 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3592 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3593 RTE_PTYPE_L4_NONFRAG,
3594 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3595 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3597 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3598 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3600 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3601 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3603 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3604 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3607 /* PPPoE --> IPv6 */
3608 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3609 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3611 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3612 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3613 RTE_PTYPE_L4_NONFRAG,
3614 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3615 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3617 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3618 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3620 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3621 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3623 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3624 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3626 /* [314] - [324] reserved */
3628 /* IPv4/IPv6 --> GTPC/GTPU */
3629 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3630 RTE_PTYPE_TUNNEL_GTPC,
3631 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3632 RTE_PTYPE_TUNNEL_GTPC,
3633 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3634 RTE_PTYPE_TUNNEL_GTPC,
3635 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3636 RTE_PTYPE_TUNNEL_GTPC,
3637 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3638 RTE_PTYPE_TUNNEL_GTPU,
3639 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3640 RTE_PTYPE_TUNNEL_GTPU,
3642 /* IPv4 --> GTPU --> IPv4 */
3643 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3644 RTE_PTYPE_TUNNEL_GTPU |
3645 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3646 RTE_PTYPE_INNER_L4_FRAG,
3647 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3648 RTE_PTYPE_TUNNEL_GTPU |
3649 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3650 RTE_PTYPE_INNER_L4_NONFRAG,
3651 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3652 RTE_PTYPE_TUNNEL_GTPU |
3653 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3654 RTE_PTYPE_INNER_L4_UDP,
3655 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3656 RTE_PTYPE_TUNNEL_GTPU |
3657 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3658 RTE_PTYPE_INNER_L4_TCP,
3659 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3660 RTE_PTYPE_TUNNEL_GTPU |
3661 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3662 RTE_PTYPE_INNER_L4_ICMP,
3664 /* IPv6 --> GTPU --> IPv4 */
3665 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3666 RTE_PTYPE_TUNNEL_GTPU |
3667 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3668 RTE_PTYPE_INNER_L4_FRAG,
3669 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3670 RTE_PTYPE_TUNNEL_GTPU |
3671 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3672 RTE_PTYPE_INNER_L4_NONFRAG,
3673 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3674 RTE_PTYPE_TUNNEL_GTPU |
3675 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3676 RTE_PTYPE_INNER_L4_UDP,
3677 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3678 RTE_PTYPE_TUNNEL_GTPU |
3679 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3680 RTE_PTYPE_INNER_L4_TCP,
3681 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3682 RTE_PTYPE_TUNNEL_GTPU |
3683 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3684 RTE_PTYPE_INNER_L4_ICMP,
3686 /* IPv4 --> GTPU --> IPv6 */
3687 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3688 RTE_PTYPE_TUNNEL_GTPU |
3689 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3690 RTE_PTYPE_INNER_L4_FRAG,
3691 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3692 RTE_PTYPE_TUNNEL_GTPU |
3693 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3694 RTE_PTYPE_INNER_L4_NONFRAG,
3695 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3696 RTE_PTYPE_TUNNEL_GTPU |
3697 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3698 RTE_PTYPE_INNER_L4_UDP,
3699 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3700 RTE_PTYPE_TUNNEL_GTPU |
3701 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3702 RTE_PTYPE_INNER_L4_TCP,
3703 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3704 RTE_PTYPE_TUNNEL_GTPU |
3705 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3706 RTE_PTYPE_INNER_L4_ICMP,
3708 /* IPv6 --> GTPU --> IPv6 */
3709 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3710 RTE_PTYPE_TUNNEL_GTPU |
3711 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3712 RTE_PTYPE_INNER_L4_FRAG,
3713 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3714 RTE_PTYPE_TUNNEL_GTPU |
3715 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3716 RTE_PTYPE_INNER_L4_NONFRAG,
3717 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3718 RTE_PTYPE_TUNNEL_GTPU |
3719 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3720 RTE_PTYPE_INNER_L4_UDP,
3721 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3722 RTE_PTYPE_TUNNEL_GTPU |
3723 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3724 RTE_PTYPE_INNER_L4_TCP,
3725 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3726 RTE_PTYPE_TUNNEL_GTPU |
3727 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3728 RTE_PTYPE_INNER_L4_ICMP,
3729 /* All others reserved */
3732 return type_table[ptype];
3736 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3738 struct ice_adapter *ad =
3739 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3742 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3743 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
3746 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
3747 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
3748 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
3749 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
3750 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
3752 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
3753 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
3754 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
3755 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
3756 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
3757 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
3760 * check the programming status descriptor in rx queue.
3761 * done after Programming Flow Director is programmed on
3765 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
3767 volatile union ice_32byte_rx_desc *rxdp;
3774 rxdp = (volatile union ice_32byte_rx_desc *)
3775 (&rxq->rx_ring[rxq->rx_tail]);
3776 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
3777 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
3778 >> ICE_RXD_QW1_STATUS_S;
3780 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
3782 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
3783 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
3784 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
3785 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
3787 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
3788 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
3789 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
3790 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
3794 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
3795 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
3797 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
3801 rxdp->wb.qword1.status_error_len = 0;
3803 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
3805 if (rxq->rx_tail == 0)
3806 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
3808 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
3814 #define ICE_FDIR_MAX_WAIT_US 10000
3817 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
3819 struct ice_tx_queue *txq = pf->fdir.txq;
3820 struct ice_rx_queue *rxq = pf->fdir.rxq;
3821 volatile struct ice_fltr_desc *fdirdp;
3822 volatile struct ice_tx_desc *txdp;
3826 fdirdp = (volatile struct ice_fltr_desc *)
3827 (&txq->tx_ring[txq->tx_tail]);
3828 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
3829 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
3831 txdp = &txq->tx_ring[txq->tx_tail + 1];
3832 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
3833 td_cmd = ICE_TX_DESC_CMD_EOP |
3834 ICE_TX_DESC_CMD_RS |
3835 ICE_TX_DESC_CMD_DUMMY;
3837 txdp->cmd_type_offset_bsz =
3838 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
3841 if (txq->tx_tail >= txq->nb_tx_desc)
3843 /* Update the tx tail register */
3844 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
3845 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
3846 if ((txdp->cmd_type_offset_bsz &
3847 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
3848 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3852 if (i >= ICE_FDIR_MAX_WAIT_US) {
3854 "Failed to program FDIR filter: time out to get DD on tx queue.");
3858 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
3861 ret = ice_check_fdir_programming_status(rxq);
3869 "Failed to program FDIR filter: programming status reported.");