1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
8 #include "rte_pmd_ice.h"
11 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
15 PKT_TX_OUTER_IP_CKSUM)
17 /* Offset of mbuf dynamic field for protocol extraction data */
18 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
20 /* Mask of mbuf dynamic flags for protocol extraction type */
21 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
22 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
27 static inline uint64_t
28 ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid)
30 static uint64_t *ol_flag_map[] = {
31 [ICE_RXDID_COMMS_AUX_VLAN] =
32 &rte_net_ice_dynflag_proto_xtr_vlan_mask,
33 [ICE_RXDID_COMMS_AUX_IPV4] =
34 &rte_net_ice_dynflag_proto_xtr_ipv4_mask,
35 [ICE_RXDID_COMMS_AUX_IPV6] =
36 &rte_net_ice_dynflag_proto_xtr_ipv6_mask,
37 [ICE_RXDID_COMMS_AUX_IPV6_FLOW] =
38 &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
39 [ICE_RXDID_COMMS_AUX_TCP] =
40 &rte_net_ice_dynflag_proto_xtr_tcp_mask,
44 ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL;
46 return ol_flag != NULL ? *ol_flag : 0ULL;
50 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
52 static uint8_t rxdid_map[] = {
53 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_GENERIC,
54 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
55 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
56 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
57 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
58 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
61 return xtr_type < RTE_DIM(rxdid_map) ?
62 rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC;
65 static enum ice_status
66 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
68 struct ice_vsi *vsi = rxq->vsi;
69 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
70 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
71 struct ice_rlan_ctx rx_ctx;
73 uint16_t buf_size, len;
74 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
75 uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
78 /* Set buffer size as the head split is disabled. */
79 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
80 RTE_PKTMBUF_HEADROOM);
82 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
83 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
84 rxq->max_pkt_len = RTE_MIN(len,
85 dev->data->dev_conf.rxmode.max_rx_pkt_len);
87 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
88 if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
89 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
90 PMD_DRV_LOG(ERR, "maximum packet length must "
91 "be larger than %u and smaller than %u,"
92 "as jumbo frame is enabled",
93 (uint32_t)RTE_ETHER_MAX_LEN,
94 (uint32_t)ICE_FRAME_SIZE_MAX);
98 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
99 rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
100 PMD_DRV_LOG(ERR, "maximum packet length must be "
101 "larger than %u and smaller than %u, "
102 "as jumbo frame is disabled",
103 (uint32_t)RTE_ETHER_MIN_LEN,
104 (uint32_t)RTE_ETHER_MAX_LEN);
109 memset(&rx_ctx, 0, sizeof(rx_ctx));
111 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
112 rx_ctx.qlen = rxq->nb_rx_desc;
113 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
114 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
115 rx_ctx.dtype = 0; /* No Header Split mode */
116 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
117 rx_ctx.dsize = 1; /* 32B descriptors */
119 rx_ctx.rxmax = rxq->max_pkt_len;
120 /* TPH: Transaction Layer Packet (TLP) processing hints */
121 rx_ctx.tphrdesc_ena = 1;
122 rx_ctx.tphwdesc_ena = 1;
123 rx_ctx.tphdata_ena = 1;
124 rx_ctx.tphhead_ena = 1;
125 /* Low Receive Queue Threshold defined in 64 descriptors units.
126 * When the number of free descriptors goes below the lrxqthresh,
127 * an immediate interrupt is triggered.
129 rx_ctx.lrxqthresh = 2;
130 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
133 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
135 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
137 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
138 rxq->port_id, rxq->queue_id, rxdid);
140 /* Enable Flexible Descriptors in the queue context which
141 * allows this driver to select a specific receive descriptor format
143 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
144 QRXFLXP_CNTXT_RXDID_IDX_M;
146 /* increasing context priority to pick up profile ID;
147 * default is 0x01; setting to 0x03 to ensure profile
148 * is programming if prev context is of same priority
150 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
151 QRXFLXP_CNTXT_RXDID_PRIO_M;
153 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
155 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
157 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
161 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
163 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
168 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
169 RTE_PKTMBUF_HEADROOM);
171 /* Check if scattered RX needs to be used. */
172 if (rxq->max_pkt_len > buf_size)
173 dev->data->scattered_rx = 1;
175 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
177 /* Init the Rx tail register*/
178 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
183 /* Allocate mbufs for all descriptors in rx queue */
185 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
187 struct ice_rx_entry *rxe = rxq->sw_ring;
191 for (i = 0; i < rxq->nb_rx_desc; i++) {
192 volatile union ice_rx_flex_desc *rxd;
193 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
195 if (unlikely(!mbuf)) {
196 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
200 rte_mbuf_refcnt_set(mbuf, 1);
202 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
204 mbuf->port = rxq->port_id;
207 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
209 rxd = &rxq->rx_ring[i];
210 rxd->read.pkt_addr = dma_addr;
211 rxd->read.hdr_addr = 0;
212 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
222 /* Free all mbufs for descriptors in rx queue */
224 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
228 if (!rxq || !rxq->sw_ring) {
229 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
233 for (i = 0; i < rxq->nb_rx_desc; i++) {
234 if (rxq->sw_ring[i].mbuf) {
235 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
236 rxq->sw_ring[i].mbuf = NULL;
239 if (rxq->rx_nb_avail == 0)
241 for (i = 0; i < rxq->rx_nb_avail; i++)
242 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
244 rxq->rx_nb_avail = 0;
247 /* turn on or off rx queue
248 * @q_idx: queue index in pf scope
249 * @on: turn on or off the queue
252 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
257 /* QRX_CTRL = QRX_ENA */
258 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
261 if (reg & QRX_CTRL_QENA_STAT_M)
262 return 0; /* Already on, skip */
263 reg |= QRX_CTRL_QENA_REQ_M;
265 if (!(reg & QRX_CTRL_QENA_STAT_M))
266 return 0; /* Already off, skip */
267 reg &= ~QRX_CTRL_QENA_REQ_M;
270 /* Write the register */
271 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
272 /* Check the result. It is said that QENA_STAT
273 * follows the QENA_REQ not more than 10 use.
274 * TODO: need to change the wait counter later
276 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
277 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
278 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
280 if ((reg & QRX_CTRL_QENA_REQ_M) &&
281 (reg & QRX_CTRL_QENA_STAT_M))
284 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
285 !(reg & QRX_CTRL_QENA_STAT_M))
290 /* Check if it is timeout */
291 if (j >= ICE_CHK_Q_ENA_COUNT) {
292 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
293 (on ? "enable" : "disable"), q_idx);
301 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
305 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
306 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
307 "rxq->rx_free_thresh=%d, "
308 "ICE_RX_MAX_BURST=%d",
309 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
311 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
312 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
313 "rxq->rx_free_thresh=%d, "
314 "rxq->nb_rx_desc=%d",
315 rxq->rx_free_thresh, rxq->nb_rx_desc);
317 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
318 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
319 "rxq->nb_rx_desc=%d, "
320 "rxq->rx_free_thresh=%d",
321 rxq->nb_rx_desc, rxq->rx_free_thresh);
328 /* reset fields in ice_rx_queue back to default */
330 ice_reset_rx_queue(struct ice_rx_queue *rxq)
336 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
340 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
342 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
343 ((volatile char *)rxq->rx_ring)[i] = 0;
345 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
346 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
347 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
349 rxq->rx_nb_avail = 0;
350 rxq->rx_next_avail = 0;
351 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
355 rxq->pkt_first_seg = NULL;
356 rxq->pkt_last_seg = NULL;
358 rxq->rxrearm_start = 0;
363 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
365 struct ice_rx_queue *rxq;
367 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
369 PMD_INIT_FUNC_TRACE();
371 if (rx_queue_id >= dev->data->nb_rx_queues) {
372 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
373 rx_queue_id, dev->data->nb_rx_queues);
377 rxq = dev->data->rx_queues[rx_queue_id];
378 if (!rxq || !rxq->q_set) {
379 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
384 err = ice_program_hw_rx_queue(rxq);
386 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
391 err = ice_alloc_rx_queue_mbufs(rxq);
393 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
397 /* Init the RX tail register. */
398 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
400 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
402 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
405 rxq->rx_rel_mbufs(rxq);
406 ice_reset_rx_queue(rxq);
410 dev->data->rx_queue_state[rx_queue_id] =
411 RTE_ETH_QUEUE_STATE_STARTED;
417 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
419 struct ice_rx_queue *rxq;
421 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
423 if (rx_queue_id < dev->data->nb_rx_queues) {
424 rxq = dev->data->rx_queues[rx_queue_id];
426 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
428 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
432 rxq->rx_rel_mbufs(rxq);
433 ice_reset_rx_queue(rxq);
434 dev->data->rx_queue_state[rx_queue_id] =
435 RTE_ETH_QUEUE_STATE_STOPPED;
442 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
444 struct ice_tx_queue *txq;
448 struct ice_aqc_add_tx_qgrp txq_elem;
449 struct ice_tlan_ctx tx_ctx;
451 PMD_INIT_FUNC_TRACE();
453 if (tx_queue_id >= dev->data->nb_tx_queues) {
454 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
455 tx_queue_id, dev->data->nb_tx_queues);
459 txq = dev->data->tx_queues[tx_queue_id];
460 if (!txq || !txq->q_set) {
461 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
467 hw = ICE_VSI_TO_HW(vsi);
469 memset(&txq_elem, 0, sizeof(txq_elem));
470 memset(&tx_ctx, 0, sizeof(tx_ctx));
471 txq_elem.num_txqs = 1;
472 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
474 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
475 tx_ctx.qlen = txq->nb_tx_desc;
476 tx_ctx.pf_num = hw->pf_id;
477 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
478 tx_ctx.src_vsi = vsi->vsi_id;
479 tx_ctx.port_num = hw->port_info->lport;
480 tx_ctx.tso_ena = 1; /* tso enable */
481 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
482 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
484 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
487 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
489 /* Init the Tx tail register*/
490 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
492 /* Fix me, we assume TC always 0 here */
493 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
494 &txq_elem, sizeof(txq_elem), NULL);
496 PMD_DRV_LOG(ERR, "Failed to add lan txq");
499 /* store the schedule node id */
500 txq->q_teid = txq_elem.txqs[0].q_teid;
502 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
506 static enum ice_status
507 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
509 struct ice_vsi *vsi = rxq->vsi;
510 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
511 uint32_t rxdid = ICE_RXDID_LEGACY_1;
512 struct ice_rlan_ctx rx_ctx;
517 rxq->rx_buf_len = 1024;
519 memset(&rx_ctx, 0, sizeof(rx_ctx));
521 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
522 rx_ctx.qlen = rxq->nb_rx_desc;
523 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
524 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
525 rx_ctx.dtype = 0; /* No Header Split mode */
526 rx_ctx.dsize = 1; /* 32B descriptors */
527 rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
528 /* TPH: Transaction Layer Packet (TLP) processing hints */
529 rx_ctx.tphrdesc_ena = 1;
530 rx_ctx.tphwdesc_ena = 1;
531 rx_ctx.tphdata_ena = 1;
532 rx_ctx.tphhead_ena = 1;
533 /* Low Receive Queue Threshold defined in 64 descriptors units.
534 * When the number of free descriptors goes below the lrxqthresh,
535 * an immediate interrupt is triggered.
537 rx_ctx.lrxqthresh = 2;
538 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
541 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
543 /* Enable Flexible Descriptors in the queue context which
544 * allows this driver to select a specific receive descriptor format
546 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
547 QRXFLXP_CNTXT_RXDID_IDX_M;
549 /* increasing context priority to pick up profile ID;
550 * default is 0x01; setting to 0x03 to ensure profile
551 * is programming if prev context is of same priority
553 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
554 QRXFLXP_CNTXT_RXDID_PRIO_M;
556 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
558 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
560 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
564 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
566 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
571 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
573 /* Init the Rx tail register*/
574 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
580 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
582 struct ice_rx_queue *rxq;
584 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
585 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
587 PMD_INIT_FUNC_TRACE();
590 if (!rxq || !rxq->q_set) {
591 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
596 err = ice_fdir_program_hw_rx_queue(rxq);
598 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
603 /* Init the RX tail register. */
604 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
606 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
608 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
611 ice_reset_rx_queue(rxq);
619 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
621 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
622 struct ice_tx_queue *txq;
626 struct ice_aqc_add_tx_qgrp txq_elem;
627 struct ice_tlan_ctx tx_ctx;
629 PMD_INIT_FUNC_TRACE();
632 if (!txq || !txq->q_set) {
633 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
639 hw = ICE_VSI_TO_HW(vsi);
641 memset(&txq_elem, 0, sizeof(txq_elem));
642 memset(&tx_ctx, 0, sizeof(tx_ctx));
643 txq_elem.num_txqs = 1;
644 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
646 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
647 tx_ctx.qlen = txq->nb_tx_desc;
648 tx_ctx.pf_num = hw->pf_id;
649 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
650 tx_ctx.src_vsi = vsi->vsi_id;
651 tx_ctx.port_num = hw->port_info->lport;
652 tx_ctx.tso_ena = 1; /* tso enable */
653 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
654 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
656 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
659 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
661 /* Init the Tx tail register*/
662 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
664 /* Fix me, we assume TC always 0 here */
665 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
666 &txq_elem, sizeof(txq_elem), NULL);
668 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
671 /* store the schedule node id */
672 txq->q_teid = txq_elem.txqs[0].q_teid;
677 /* Free all mbufs for descriptors in tx queue */
679 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
683 if (!txq || !txq->sw_ring) {
684 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
688 for (i = 0; i < txq->nb_tx_desc; i++) {
689 if (txq->sw_ring[i].mbuf) {
690 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
691 txq->sw_ring[i].mbuf = NULL;
697 ice_reset_tx_queue(struct ice_tx_queue *txq)
699 struct ice_tx_entry *txe;
700 uint16_t i, prev, size;
703 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
708 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
709 for (i = 0; i < size; i++)
710 ((volatile char *)txq->tx_ring)[i] = 0;
712 prev = (uint16_t)(txq->nb_tx_desc - 1);
713 for (i = 0; i < txq->nb_tx_desc; i++) {
714 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
716 txd->cmd_type_offset_bsz =
717 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
720 txe[prev].next_id = i;
724 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
725 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
730 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
731 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
735 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
737 struct ice_tx_queue *txq;
738 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
739 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
740 struct ice_vsi *vsi = pf->main_vsi;
741 enum ice_status status;
744 uint16_t q_handle = tx_queue_id;
746 if (tx_queue_id >= dev->data->nb_tx_queues) {
747 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
748 tx_queue_id, dev->data->nb_tx_queues);
752 txq = dev->data->tx_queues[tx_queue_id];
754 PMD_DRV_LOG(ERR, "TX queue %u is not available",
759 q_ids[0] = txq->reg_idx;
760 q_teids[0] = txq->q_teid;
762 /* Fix me, we assume TC always 0 here */
763 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
764 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
765 if (status != ICE_SUCCESS) {
766 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
770 txq->tx_rel_mbufs(txq);
771 ice_reset_tx_queue(txq);
772 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
778 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
780 struct ice_rx_queue *rxq;
782 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
783 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
787 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
789 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
793 rxq->rx_rel_mbufs(rxq);
799 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
801 struct ice_tx_queue *txq;
802 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
803 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
804 struct ice_vsi *vsi = pf->main_vsi;
805 enum ice_status status;
808 uint16_t q_handle = tx_queue_id;
812 PMD_DRV_LOG(ERR, "TX queue %u is not available",
818 q_ids[0] = txq->reg_idx;
819 q_teids[0] = txq->q_teid;
821 /* Fix me, we assume TC always 0 here */
822 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
823 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
824 if (status != ICE_SUCCESS) {
825 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
829 txq->tx_rel_mbufs(txq);
835 ice_rx_queue_setup(struct rte_eth_dev *dev,
838 unsigned int socket_id,
839 const struct rte_eth_rxconf *rx_conf,
840 struct rte_mempool *mp)
842 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
843 struct ice_adapter *ad =
844 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
845 struct ice_vsi *vsi = pf->main_vsi;
846 struct ice_rx_queue *rxq;
847 const struct rte_memzone *rz;
850 int use_def_burst_func = 1;
852 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
853 nb_desc > ICE_MAX_RING_DESC ||
854 nb_desc < ICE_MIN_RING_DESC) {
855 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
860 /* Free memory if needed */
861 if (dev->data->rx_queues[queue_idx]) {
862 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
863 dev->data->rx_queues[queue_idx] = NULL;
866 /* Allocate the rx queue data structure */
867 rxq = rte_zmalloc_socket(NULL,
868 sizeof(struct ice_rx_queue),
872 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
873 "rx queue data structure");
877 rxq->nb_rx_desc = nb_desc;
878 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
879 rxq->queue_id = queue_idx;
881 rxq->reg_idx = vsi->base_queue + queue_idx;
882 rxq->port_id = dev->data->port_id;
883 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
884 rxq->crc_len = RTE_ETHER_CRC_LEN;
888 rxq->drop_en = rx_conf->rx_drop_en;
890 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
891 rxq->proto_xtr = pf->proto_xtr != NULL ?
892 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
894 /* Allocate the maximun number of RX ring hardware descriptor. */
895 len = ICE_MAX_RING_DESC;
898 * Allocating a little more memory because vectorized/bulk_alloc Rx
899 * functions doesn't check boundaries each time.
901 len += ICE_RX_MAX_BURST;
903 /* Allocate the maximum number of RX ring hardware descriptor. */
904 ring_size = sizeof(union ice_rx_flex_desc) * len;
905 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
906 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
907 ring_size, ICE_RING_BASE_ALIGN,
910 ice_rx_queue_release(rxq);
911 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
915 /* Zero all the descriptors in the ring. */
916 memset(rz->addr, 0, ring_size);
918 rxq->rx_ring_dma = rz->iova;
919 rxq->rx_ring = rz->addr;
921 /* always reserve more for bulk alloc */
922 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
924 /* Allocate the software ring. */
925 rxq->sw_ring = rte_zmalloc_socket(NULL,
926 sizeof(struct ice_rx_entry) * len,
930 ice_rx_queue_release(rxq);
931 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
935 ice_reset_rx_queue(rxq);
937 dev->data->rx_queues[queue_idx] = rxq;
938 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
940 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
942 if (!use_def_burst_func) {
943 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
944 "satisfied. Rx Burst Bulk Alloc function will be "
945 "used on port=%d, queue=%d.",
946 rxq->port_id, rxq->queue_id);
948 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
949 "not satisfied, Scattered Rx is requested. "
950 "on port=%d, queue=%d.",
951 rxq->port_id, rxq->queue_id);
952 ad->rx_bulk_alloc_allowed = false;
959 ice_rx_queue_release(void *rxq)
961 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
964 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
969 rte_free(q->sw_ring);
974 ice_tx_queue_setup(struct rte_eth_dev *dev,
977 unsigned int socket_id,
978 const struct rte_eth_txconf *tx_conf)
980 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
981 struct ice_vsi *vsi = pf->main_vsi;
982 struct ice_tx_queue *txq;
983 const struct rte_memzone *tz;
985 uint16_t tx_rs_thresh, tx_free_thresh;
988 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
990 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
991 nb_desc > ICE_MAX_RING_DESC ||
992 nb_desc < ICE_MIN_RING_DESC) {
993 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
999 * The following two parameters control the setting of the RS bit on
1000 * transmit descriptors. TX descriptors will have their RS bit set
1001 * after txq->tx_rs_thresh descriptors have been used. The TX
1002 * descriptor ring will be cleaned after txq->tx_free_thresh
1003 * descriptors are used or if the number of descriptors required to
1004 * transmit a packet is greater than the number of free TX descriptors.
1006 * The following constraints must be satisfied:
1007 * - tx_rs_thresh must be greater than 0.
1008 * - tx_rs_thresh must be less than the size of the ring minus 2.
1009 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1010 * - tx_rs_thresh must be a divisor of the ring size.
1011 * - tx_free_thresh must be greater than 0.
1012 * - tx_free_thresh must be less than the size of the ring minus 3.
1013 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1015 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1016 * race condition, hence the maximum threshold constraints. When set
1017 * to zero use default values.
1019 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1020 tx_conf->tx_free_thresh :
1021 ICE_DEFAULT_TX_FREE_THRESH);
1022 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1024 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1025 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1026 if (tx_conf->tx_rs_thresh)
1027 tx_rs_thresh = tx_conf->tx_rs_thresh;
1028 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1029 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1030 "exceed nb_desc. (tx_rs_thresh=%u "
1031 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1032 (unsigned int)tx_rs_thresh,
1033 (unsigned int)tx_free_thresh,
1034 (unsigned int)nb_desc,
1035 (int)dev->data->port_id,
1039 if (tx_rs_thresh >= (nb_desc - 2)) {
1040 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1041 "number of TX descriptors minus 2. "
1042 "(tx_rs_thresh=%u port=%d queue=%d)",
1043 (unsigned int)tx_rs_thresh,
1044 (int)dev->data->port_id,
1048 if (tx_free_thresh >= (nb_desc - 3)) {
1049 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1050 "tx_free_thresh must be less than the "
1051 "number of TX descriptors minus 3. "
1052 "(tx_free_thresh=%u port=%d queue=%d)",
1053 (unsigned int)tx_free_thresh,
1054 (int)dev->data->port_id,
1058 if (tx_rs_thresh > tx_free_thresh) {
1059 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1060 "equal to tx_free_thresh. (tx_free_thresh=%u"
1061 " tx_rs_thresh=%u port=%d queue=%d)",
1062 (unsigned int)tx_free_thresh,
1063 (unsigned int)tx_rs_thresh,
1064 (int)dev->data->port_id,
1068 if ((nb_desc % tx_rs_thresh) != 0) {
1069 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1070 "number of TX descriptors. (tx_rs_thresh=%u"
1071 " port=%d queue=%d)",
1072 (unsigned int)tx_rs_thresh,
1073 (int)dev->data->port_id,
1077 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1078 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1079 "tx_rs_thresh is greater than 1. "
1080 "(tx_rs_thresh=%u port=%d queue=%d)",
1081 (unsigned int)tx_rs_thresh,
1082 (int)dev->data->port_id,
1087 /* Free memory if needed. */
1088 if (dev->data->tx_queues[queue_idx]) {
1089 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1090 dev->data->tx_queues[queue_idx] = NULL;
1093 /* Allocate the TX queue data structure. */
1094 txq = rte_zmalloc_socket(NULL,
1095 sizeof(struct ice_tx_queue),
1096 RTE_CACHE_LINE_SIZE,
1099 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1100 "tx queue structure");
1104 /* Allocate TX hardware ring descriptors. */
1105 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1106 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1107 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1108 ring_size, ICE_RING_BASE_ALIGN,
1111 ice_tx_queue_release(txq);
1112 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1116 txq->nb_tx_desc = nb_desc;
1117 txq->tx_rs_thresh = tx_rs_thresh;
1118 txq->tx_free_thresh = tx_free_thresh;
1119 txq->pthresh = tx_conf->tx_thresh.pthresh;
1120 txq->hthresh = tx_conf->tx_thresh.hthresh;
1121 txq->wthresh = tx_conf->tx_thresh.wthresh;
1122 txq->queue_id = queue_idx;
1124 txq->reg_idx = vsi->base_queue + queue_idx;
1125 txq->port_id = dev->data->port_id;
1126 txq->offloads = offloads;
1128 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1130 txq->tx_ring_dma = tz->iova;
1131 txq->tx_ring = tz->addr;
1133 /* Allocate software ring */
1135 rte_zmalloc_socket(NULL,
1136 sizeof(struct ice_tx_entry) * nb_desc,
1137 RTE_CACHE_LINE_SIZE,
1139 if (!txq->sw_ring) {
1140 ice_tx_queue_release(txq);
1141 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1145 ice_reset_tx_queue(txq);
1147 dev->data->tx_queues[queue_idx] = txq;
1148 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1149 ice_set_tx_function_flag(dev, txq);
1155 ice_tx_queue_release(void *txq)
1157 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1160 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1165 rte_free(q->sw_ring);
1170 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1171 struct rte_eth_rxq_info *qinfo)
1173 struct ice_rx_queue *rxq;
1175 rxq = dev->data->rx_queues[queue_id];
1177 qinfo->mp = rxq->mp;
1178 qinfo->scattered_rx = dev->data->scattered_rx;
1179 qinfo->nb_desc = rxq->nb_rx_desc;
1181 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1182 qinfo->conf.rx_drop_en = rxq->drop_en;
1183 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1187 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1188 struct rte_eth_txq_info *qinfo)
1190 struct ice_tx_queue *txq;
1192 txq = dev->data->tx_queues[queue_id];
1194 qinfo->nb_desc = txq->nb_tx_desc;
1196 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1197 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1198 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1200 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1201 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1202 qinfo->conf.offloads = txq->offloads;
1203 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1207 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1209 #define ICE_RXQ_SCAN_INTERVAL 4
1210 volatile union ice_rx_flex_desc *rxdp;
1211 struct ice_rx_queue *rxq;
1214 rxq = dev->data->rx_queues[rx_queue_id];
1215 rxdp = &rxq->rx_ring[rxq->rx_tail];
1216 while ((desc < rxq->nb_rx_desc) &&
1217 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1218 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1220 * Check the DD bit of a rx descriptor of each 4 in a group,
1221 * to avoid checking too frequently and downgrading performance
1224 desc += ICE_RXQ_SCAN_INTERVAL;
1225 rxdp += ICE_RXQ_SCAN_INTERVAL;
1226 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1227 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1228 desc - rxq->nb_rx_desc]);
1234 #define ICE_RX_FLEX_ERR0_BITS \
1235 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1236 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1237 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1238 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1239 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1240 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1242 /* Rx L3/L4 checksum */
1243 static inline uint64_t
1244 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1248 /* check if HW has decoded the packet and checksum */
1249 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1252 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1253 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1257 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1258 flags |= PKT_RX_IP_CKSUM_BAD;
1260 flags |= PKT_RX_IP_CKSUM_GOOD;
1262 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1263 flags |= PKT_RX_L4_CKSUM_BAD;
1265 flags |= PKT_RX_L4_CKSUM_GOOD;
1267 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1268 flags |= PKT_RX_EIP_CKSUM_BAD;
1274 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1276 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1277 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1278 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1280 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1281 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1282 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1287 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1288 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1289 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1290 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1291 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1292 mb->vlan_tci_outer = mb->vlan_tci;
1293 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1294 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1295 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1296 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1298 mb->vlan_tci_outer = 0;
1301 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1302 mb->vlan_tci, mb->vlan_tci_outer);
1305 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1306 #define ICE_RX_PROTO_XTR_VALID \
1307 ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
1308 (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
1311 ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
1312 volatile struct ice_32b_rx_flex_desc_comms *desc)
1314 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
1318 if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID)))
1321 ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid);
1322 if (unlikely(!ol_flag))
1325 mb->ol_flags |= ol_flag;
1327 metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ?
1328 rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0;
1330 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)))
1331 metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
1333 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
1338 ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
1339 volatile union ice_rx_flex_desc *rxdp)
1341 volatile struct ice_32b_rx_flex_desc_comms *desc =
1342 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
1345 stat_err = rte_le_to_cpu_16(desc->status_error0);
1346 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
1347 mb->ol_flags |= PKT_RX_RSS_HASH;
1348 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
1351 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1352 if (desc->flow_id != 0xFFFFFFFF) {
1353 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1354 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
1357 if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
1358 ice_rxd_to_proto_xtr(mb, desc);
1362 #define ICE_LOOK_AHEAD 8
1363 #if (ICE_LOOK_AHEAD != 8)
1364 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1367 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1369 volatile union ice_rx_flex_desc *rxdp;
1370 struct ice_rx_entry *rxep;
1371 struct rte_mbuf *mb;
1374 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1375 int32_t i, j, nb_rx = 0;
1376 uint64_t pkt_flags = 0;
1377 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1379 rxdp = &rxq->rx_ring[rxq->rx_tail];
1380 rxep = &rxq->sw_ring[rxq->rx_tail];
1382 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1384 /* Make sure there is at least 1 packet to receive */
1385 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1389 * Scan LOOK_AHEAD descriptors at a time to determine which
1390 * descriptors reference packets that are ready to be received.
1392 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1393 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1394 /* Read desc statuses backwards to avoid race condition */
1395 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1396 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1400 /* Compute how many status bits were set */
1401 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1402 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1406 /* Translate descriptor info to mbuf parameters */
1407 for (j = 0; j < nb_dd; j++) {
1409 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1410 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1411 mb->data_len = pkt_len;
1412 mb->pkt_len = pkt_len;
1414 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1415 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1416 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1417 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1418 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1419 ice_rxd_to_pkt_fields(mb, &rxdp[j]);
1421 mb->ol_flags |= pkt_flags;
1424 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1425 rxq->rx_stage[i + j] = rxep[j].mbuf;
1427 if (nb_dd != ICE_LOOK_AHEAD)
1431 /* Clear software ring entries */
1432 for (i = 0; i < nb_rx; i++)
1433 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1435 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1436 "port_id=%u, queue_id=%u, nb_rx=%d",
1437 rxq->port_id, rxq->queue_id, nb_rx);
1442 static inline uint16_t
1443 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1444 struct rte_mbuf **rx_pkts,
1448 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1450 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1452 for (i = 0; i < nb_pkts; i++)
1453 rx_pkts[i] = stage[i];
1455 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1456 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1462 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1464 volatile union ice_rx_flex_desc *rxdp;
1465 struct ice_rx_entry *rxep;
1466 struct rte_mbuf *mb;
1467 uint16_t alloc_idx, i;
1471 /* Allocate buffers in bulk */
1472 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1473 (rxq->rx_free_thresh - 1));
1474 rxep = &rxq->sw_ring[alloc_idx];
1475 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1476 rxq->rx_free_thresh);
1477 if (unlikely(diag != 0)) {
1478 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1482 rxdp = &rxq->rx_ring[alloc_idx];
1483 for (i = 0; i < rxq->rx_free_thresh; i++) {
1484 if (likely(i < (rxq->rx_free_thresh - 1)))
1485 /* Prefetch next mbuf */
1486 rte_prefetch0(rxep[i + 1].mbuf);
1489 rte_mbuf_refcnt_set(mb, 1);
1491 mb->data_off = RTE_PKTMBUF_HEADROOM;
1493 mb->port = rxq->port_id;
1494 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1495 rxdp[i].read.hdr_addr = 0;
1496 rxdp[i].read.pkt_addr = dma_addr;
1499 /* Update rx tail regsiter */
1500 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1502 rxq->rx_free_trigger =
1503 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1504 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1505 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1510 static inline uint16_t
1511 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1513 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1515 struct rte_eth_dev *dev;
1520 if (rxq->rx_nb_avail)
1521 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1523 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1524 rxq->rx_next_avail = 0;
1525 rxq->rx_nb_avail = nb_rx;
1526 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1528 if (rxq->rx_tail > rxq->rx_free_trigger) {
1529 if (ice_rx_alloc_bufs(rxq) != 0) {
1532 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1533 dev->data->rx_mbuf_alloc_failed +=
1534 rxq->rx_free_thresh;
1535 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1536 "port_id=%u, queue_id=%u",
1537 rxq->port_id, rxq->queue_id);
1538 rxq->rx_nb_avail = 0;
1539 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1540 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1541 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1547 if (rxq->rx_tail >= rxq->nb_rx_desc)
1550 if (rxq->rx_nb_avail)
1551 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1557 ice_recv_pkts_bulk_alloc(void *rx_queue,
1558 struct rte_mbuf **rx_pkts,
1565 if (unlikely(nb_pkts == 0))
1568 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1569 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1572 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1573 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1574 nb_rx = (uint16_t)(nb_rx + count);
1575 nb_pkts = (uint16_t)(nb_pkts - count);
1584 ice_recv_scattered_pkts(void *rx_queue,
1585 struct rte_mbuf **rx_pkts,
1588 struct ice_rx_queue *rxq = rx_queue;
1589 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1590 volatile union ice_rx_flex_desc *rxdp;
1591 union ice_rx_flex_desc rxd;
1592 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1593 struct ice_rx_entry *rxe;
1594 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1595 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1596 struct rte_mbuf *nmb; /* new allocated mbuf */
1597 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1598 uint16_t rx_id = rxq->rx_tail;
1600 uint16_t nb_hold = 0;
1601 uint16_t rx_packet_len;
1602 uint16_t rx_stat_err0;
1605 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1606 struct rte_eth_dev *dev;
1608 while (nb_rx < nb_pkts) {
1609 rxdp = &rx_ring[rx_id];
1610 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1612 /* Check the DD bit first */
1613 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1617 nmb = rte_mbuf_raw_alloc(rxq->mp);
1618 if (unlikely(!nmb)) {
1619 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1620 dev->data->rx_mbuf_alloc_failed++;
1623 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1626 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1628 if (unlikely(rx_id == rxq->nb_rx_desc))
1631 /* Prefetch next mbuf */
1632 rte_prefetch0(sw_ring[rx_id].mbuf);
1635 * When next RX descriptor is on a cache line boundary,
1636 * prefetch the next 4 RX descriptors and next 8 pointers
1639 if ((rx_id & 0x3) == 0) {
1640 rte_prefetch0(&rx_ring[rx_id]);
1641 rte_prefetch0(&sw_ring[rx_id]);
1647 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1649 /* Set data buffer address and data length of the mbuf */
1650 rxdp->read.hdr_addr = 0;
1651 rxdp->read.pkt_addr = dma_addr;
1652 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1653 ICE_RX_FLX_DESC_PKT_LEN_M;
1654 rxm->data_len = rx_packet_len;
1655 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1658 * If this is the first buffer of the received packet, set the
1659 * pointer to the first mbuf of the packet and initialize its
1660 * context. Otherwise, update the total length and the number
1661 * of segments of the current scattered packet, and update the
1662 * pointer to the last mbuf of the current packet.
1666 first_seg->nb_segs = 1;
1667 first_seg->pkt_len = rx_packet_len;
1669 first_seg->pkt_len =
1670 (uint16_t)(first_seg->pkt_len +
1672 first_seg->nb_segs++;
1673 last_seg->next = rxm;
1677 * If this is not the last buffer of the received packet,
1678 * update the pointer to the last mbuf of the current scattered
1679 * packet and continue to parse the RX ring.
1681 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1687 * This is the last buffer of the received packet. If the CRC
1688 * is not stripped by the hardware:
1689 * - Subtract the CRC length from the total packet length.
1690 * - If the last buffer only contains the whole CRC or a part
1691 * of it, free the mbuf associated to the last buffer. If part
1692 * of the CRC is also contained in the previous mbuf, subtract
1693 * the length of that CRC part from the data length of the
1697 if (unlikely(rxq->crc_len > 0)) {
1698 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1699 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1700 rte_pktmbuf_free_seg(rxm);
1701 first_seg->nb_segs--;
1702 last_seg->data_len =
1703 (uint16_t)(last_seg->data_len -
1704 (RTE_ETHER_CRC_LEN - rx_packet_len));
1705 last_seg->next = NULL;
1707 rxm->data_len = (uint16_t)(rx_packet_len -
1711 first_seg->port = rxq->port_id;
1712 first_seg->ol_flags = 0;
1713 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1714 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1715 ice_rxd_to_vlan_tci(first_seg, &rxd);
1716 ice_rxd_to_pkt_fields(first_seg, &rxd);
1717 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1718 first_seg->ol_flags |= pkt_flags;
1719 /* Prefetch data of first segment, if configured to do so. */
1720 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1721 first_seg->data_off));
1722 rx_pkts[nb_rx++] = first_seg;
1726 /* Record index of the next RX descriptor to probe. */
1727 rxq->rx_tail = rx_id;
1728 rxq->pkt_first_seg = first_seg;
1729 rxq->pkt_last_seg = last_seg;
1732 * If the number of free RX descriptors is greater than the RX free
1733 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1734 * register. Update the RDT with the value of the last processed RX
1735 * descriptor minus 1, to guarantee that the RDT register is never
1736 * equal to the RDH register, which creates a "full" ring situtation
1737 * from the hardware point of view.
1739 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1740 if (nb_hold > rxq->rx_free_thresh) {
1741 rx_id = (uint16_t)(rx_id == 0 ?
1742 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1743 /* write TAIL register */
1744 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1747 rxq->nb_rx_hold = nb_hold;
1749 /* return received packet in the burst */
1754 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1756 struct ice_adapter *ad =
1757 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1758 const uint32_t *ptypes;
1760 static const uint32_t ptypes_os[] = {
1761 /* refers to ice_get_default_pkt_type() */
1763 RTE_PTYPE_L2_ETHER_TIMESYNC,
1764 RTE_PTYPE_L2_ETHER_LLDP,
1765 RTE_PTYPE_L2_ETHER_ARP,
1766 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1767 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1770 RTE_PTYPE_L4_NONFRAG,
1774 RTE_PTYPE_TUNNEL_GRENAT,
1775 RTE_PTYPE_TUNNEL_IP,
1776 RTE_PTYPE_INNER_L2_ETHER,
1777 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1778 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1779 RTE_PTYPE_INNER_L4_FRAG,
1780 RTE_PTYPE_INNER_L4_ICMP,
1781 RTE_PTYPE_INNER_L4_NONFRAG,
1782 RTE_PTYPE_INNER_L4_SCTP,
1783 RTE_PTYPE_INNER_L4_TCP,
1784 RTE_PTYPE_INNER_L4_UDP,
1788 static const uint32_t ptypes_comms[] = {
1789 /* refers to ice_get_default_pkt_type() */
1791 RTE_PTYPE_L2_ETHER_TIMESYNC,
1792 RTE_PTYPE_L2_ETHER_LLDP,
1793 RTE_PTYPE_L2_ETHER_ARP,
1794 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1795 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1798 RTE_PTYPE_L4_NONFRAG,
1802 RTE_PTYPE_TUNNEL_GRENAT,
1803 RTE_PTYPE_TUNNEL_IP,
1804 RTE_PTYPE_INNER_L2_ETHER,
1805 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1806 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1807 RTE_PTYPE_INNER_L4_FRAG,
1808 RTE_PTYPE_INNER_L4_ICMP,
1809 RTE_PTYPE_INNER_L4_NONFRAG,
1810 RTE_PTYPE_INNER_L4_SCTP,
1811 RTE_PTYPE_INNER_L4_TCP,
1812 RTE_PTYPE_INNER_L4_UDP,
1813 RTE_PTYPE_TUNNEL_GTPC,
1814 RTE_PTYPE_TUNNEL_GTPU,
1815 RTE_PTYPE_L2_ETHER_PPPOE,
1819 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1820 ptypes = ptypes_comms;
1824 if (dev->rx_pkt_burst == ice_recv_pkts ||
1825 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1826 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1830 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1831 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1832 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1833 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1841 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1843 volatile union ice_rx_flex_desc *rxdp;
1844 struct ice_rx_queue *rxq = rx_queue;
1847 if (unlikely(offset >= rxq->nb_rx_desc))
1850 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1851 return RTE_ETH_RX_DESC_UNAVAIL;
1853 desc = rxq->rx_tail + offset;
1854 if (desc >= rxq->nb_rx_desc)
1855 desc -= rxq->nb_rx_desc;
1857 rxdp = &rxq->rx_ring[desc];
1858 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1859 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
1860 return RTE_ETH_RX_DESC_DONE;
1862 return RTE_ETH_RX_DESC_AVAIL;
1866 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1868 struct ice_tx_queue *txq = tx_queue;
1869 volatile uint64_t *status;
1870 uint64_t mask, expect;
1873 if (unlikely(offset >= txq->nb_tx_desc))
1876 desc = txq->tx_tail + offset;
1877 /* go to next desc that has the RS bit */
1878 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1880 if (desc >= txq->nb_tx_desc) {
1881 desc -= txq->nb_tx_desc;
1882 if (desc >= txq->nb_tx_desc)
1883 desc -= txq->nb_tx_desc;
1886 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1887 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1888 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1889 ICE_TXD_QW1_DTYPE_S);
1890 if ((*status & mask) == expect)
1891 return RTE_ETH_TX_DESC_DONE;
1893 return RTE_ETH_TX_DESC_FULL;
1897 ice_free_queues(struct rte_eth_dev *dev)
1901 PMD_INIT_FUNC_TRACE();
1903 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1904 if (!dev->data->rx_queues[i])
1906 ice_rx_queue_release(dev->data->rx_queues[i]);
1907 dev->data->rx_queues[i] = NULL;
1908 rte_eth_dma_zone_free(dev, "rx_ring", i);
1910 dev->data->nb_rx_queues = 0;
1912 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1913 if (!dev->data->tx_queues[i])
1915 ice_tx_queue_release(dev->data->tx_queues[i]);
1916 dev->data->tx_queues[i] = NULL;
1917 rte_eth_dma_zone_free(dev, "tx_ring", i);
1919 dev->data->nb_tx_queues = 0;
1922 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
1923 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
1926 ice_fdir_setup_tx_resources(struct ice_pf *pf)
1928 struct ice_tx_queue *txq;
1929 const struct rte_memzone *tz = NULL;
1931 struct rte_eth_dev *dev;
1934 PMD_DRV_LOG(ERR, "PF is not available");
1938 dev = pf->adapter->eth_dev;
1940 /* Allocate the TX queue data structure. */
1941 txq = rte_zmalloc_socket("ice fdir tx queue",
1942 sizeof(struct ice_tx_queue),
1943 RTE_CACHE_LINE_SIZE,
1946 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1947 "tx queue structure.");
1951 /* Allocate TX hardware ring descriptors. */
1952 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
1953 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1955 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
1956 ICE_FDIR_QUEUE_ID, ring_size,
1957 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
1959 ice_tx_queue_release(txq);
1960 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
1964 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
1965 txq->queue_id = ICE_FDIR_QUEUE_ID;
1966 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
1967 txq->vsi = pf->fdir.fdir_vsi;
1969 txq->tx_ring_dma = tz->iova;
1970 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
1972 * don't need to allocate software ring and reset for the fdir
1973 * program queue just set the queue has been configured.
1978 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1984 ice_fdir_setup_rx_resources(struct ice_pf *pf)
1986 struct ice_rx_queue *rxq;
1987 const struct rte_memzone *rz = NULL;
1989 struct rte_eth_dev *dev;
1992 PMD_DRV_LOG(ERR, "PF is not available");
1996 dev = pf->adapter->eth_dev;
1998 /* Allocate the RX queue data structure. */
1999 rxq = rte_zmalloc_socket("ice fdir rx queue",
2000 sizeof(struct ice_rx_queue),
2001 RTE_CACHE_LINE_SIZE,
2004 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2005 "rx queue structure.");
2009 /* Allocate RX hardware ring descriptors. */
2010 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2011 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2013 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2014 ICE_FDIR_QUEUE_ID, ring_size,
2015 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2017 ice_rx_queue_release(rxq);
2018 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2022 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2023 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2024 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2025 rxq->vsi = pf->fdir.fdir_vsi;
2027 rxq->rx_ring_dma = rz->iova;
2028 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2029 sizeof(union ice_32byte_rx_desc));
2030 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2033 * Don't need to allocate software ring and reset for the fdir
2034 * rx queue, just set the queue has been configured.
2039 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2045 ice_recv_pkts(void *rx_queue,
2046 struct rte_mbuf **rx_pkts,
2049 struct ice_rx_queue *rxq = rx_queue;
2050 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2051 volatile union ice_rx_flex_desc *rxdp;
2052 union ice_rx_flex_desc rxd;
2053 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2054 struct ice_rx_entry *rxe;
2055 struct rte_mbuf *nmb; /* new allocated mbuf */
2056 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2057 uint16_t rx_id = rxq->rx_tail;
2059 uint16_t nb_hold = 0;
2060 uint16_t rx_packet_len;
2061 uint16_t rx_stat_err0;
2064 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2065 struct rte_eth_dev *dev;
2067 while (nb_rx < nb_pkts) {
2068 rxdp = &rx_ring[rx_id];
2069 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2071 /* Check the DD bit first */
2072 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2076 nmb = rte_mbuf_raw_alloc(rxq->mp);
2077 if (unlikely(!nmb)) {
2078 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2079 dev->data->rx_mbuf_alloc_failed++;
2082 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2085 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2087 if (unlikely(rx_id == rxq->nb_rx_desc))
2092 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2095 * fill the read format of descriptor with physic address in
2096 * new allocated mbuf: nmb
2098 rxdp->read.hdr_addr = 0;
2099 rxdp->read.pkt_addr = dma_addr;
2101 /* calculate rx_packet_len of the received pkt */
2102 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2103 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2105 /* fill old mbuf with received descriptor: rxd */
2106 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2107 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2110 rxm->pkt_len = rx_packet_len;
2111 rxm->data_len = rx_packet_len;
2112 rxm->port = rxq->port_id;
2113 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2114 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2115 ice_rxd_to_vlan_tci(rxm, &rxd);
2116 ice_rxd_to_pkt_fields(rxm, &rxd);
2117 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2118 rxm->ol_flags |= pkt_flags;
2119 /* copy old mbuf to rx_pkts */
2120 rx_pkts[nb_rx++] = rxm;
2122 rxq->rx_tail = rx_id;
2124 * If the number of free RX descriptors is greater than the RX free
2125 * threshold of the queue, advance the receive tail register of queue.
2126 * Update that register with the value of the last processed RX
2127 * descriptor minus 1.
2129 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2130 if (nb_hold > rxq->rx_free_thresh) {
2131 rx_id = (uint16_t)(rx_id == 0 ?
2132 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2133 /* write TAIL register */
2134 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
2137 rxq->nb_rx_hold = nb_hold;
2139 /* return received packet in the burst */
2144 ice_parse_tunneling_params(uint64_t ol_flags,
2145 union ice_tx_offload tx_offload,
2146 uint32_t *cd_tunneling)
2148 /* EIPT: External (outer) IP header type */
2149 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2150 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2151 else if (ol_flags & PKT_TX_OUTER_IPV4)
2152 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2153 else if (ol_flags & PKT_TX_OUTER_IPV6)
2154 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2156 /* EIPLEN: External (outer) IP header length, in DWords */
2157 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2158 ICE_TXD_CTX_QW0_EIPLEN_S;
2160 /* L4TUNT: L4 Tunneling Type */
2161 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2162 case PKT_TX_TUNNEL_IPIP:
2163 /* for non UDP / GRE tunneling, set to 00b */
2165 case PKT_TX_TUNNEL_VXLAN:
2166 case PKT_TX_TUNNEL_GTP:
2167 case PKT_TX_TUNNEL_GENEVE:
2168 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2170 case PKT_TX_TUNNEL_GRE:
2171 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2174 PMD_TX_LOG(ERR, "Tunnel type not supported");
2178 /* L4TUNLEN: L4 Tunneling Length, in Words
2180 * We depend on app to set rte_mbuf.l2_len correctly.
2181 * For IP in GRE it should be set to the length of the GRE
2183 * For MAC in GRE or MAC in UDP it should be set to the length
2184 * of the GRE or UDP headers plus the inner MAC up to including
2185 * its last Ethertype.
2186 * If MPLS labels exists, it should include them as well.
2188 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2189 ICE_TXD_CTX_QW0_NATLEN_S;
2191 if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
2192 (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2193 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2194 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2198 ice_txd_enable_checksum(uint64_t ol_flags,
2200 uint32_t *td_offset,
2201 union ice_tx_offload tx_offload)
2204 if (ol_flags & PKT_TX_TUNNEL_MASK)
2205 *td_offset |= (tx_offload.outer_l2_len >> 1)
2206 << ICE_TX_DESC_LEN_MACLEN_S;
2208 *td_offset |= (tx_offload.l2_len >> 1)
2209 << ICE_TX_DESC_LEN_MACLEN_S;
2211 /* Enable L3 checksum offloads */
2212 if (ol_flags & PKT_TX_IP_CKSUM) {
2213 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2214 *td_offset |= (tx_offload.l3_len >> 2) <<
2215 ICE_TX_DESC_LEN_IPLEN_S;
2216 } else if (ol_flags & PKT_TX_IPV4) {
2217 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2218 *td_offset |= (tx_offload.l3_len >> 2) <<
2219 ICE_TX_DESC_LEN_IPLEN_S;
2220 } else if (ol_flags & PKT_TX_IPV6) {
2221 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2222 *td_offset |= (tx_offload.l3_len >> 2) <<
2223 ICE_TX_DESC_LEN_IPLEN_S;
2226 if (ol_flags & PKT_TX_TCP_SEG) {
2227 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2228 *td_offset |= (tx_offload.l4_len >> 2) <<
2229 ICE_TX_DESC_LEN_L4_LEN_S;
2233 /* Enable L4 checksum offloads */
2234 switch (ol_flags & PKT_TX_L4_MASK) {
2235 case PKT_TX_TCP_CKSUM:
2236 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2237 *td_offset |= (tx_offload.l4_len >> 2) <<
2238 ICE_TX_DESC_LEN_L4_LEN_S;
2240 case PKT_TX_SCTP_CKSUM:
2241 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2242 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2243 ICE_TX_DESC_LEN_L4_LEN_S;
2245 case PKT_TX_UDP_CKSUM:
2246 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2247 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2248 ICE_TX_DESC_LEN_L4_LEN_S;
2256 ice_xmit_cleanup(struct ice_tx_queue *txq)
2258 struct ice_tx_entry *sw_ring = txq->sw_ring;
2259 volatile struct ice_tx_desc *txd = txq->tx_ring;
2260 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2261 uint16_t nb_tx_desc = txq->nb_tx_desc;
2262 uint16_t desc_to_clean_to;
2263 uint16_t nb_tx_to_clean;
2265 /* Determine the last descriptor needing to be cleaned */
2266 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2267 if (desc_to_clean_to >= nb_tx_desc)
2268 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2270 /* Check to make sure the last descriptor to clean is done */
2271 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2272 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2273 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2274 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
2275 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2277 txq->port_id, txq->queue_id,
2278 txd[desc_to_clean_to].cmd_type_offset_bsz);
2279 /* Failed to clean any descriptors */
2283 /* Figure out how many descriptors will be cleaned */
2284 if (last_desc_cleaned > desc_to_clean_to)
2285 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2288 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2291 /* The last descriptor to clean is done, so that means all the
2292 * descriptors from the last descriptor that was cleaned
2293 * up to the last descriptor with the RS bit set
2294 * are done. Only reset the threshold descriptor.
2296 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2298 /* Update the txq to reflect the last descriptor that was cleaned */
2299 txq->last_desc_cleaned = desc_to_clean_to;
2300 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2305 /* Construct the tx flags */
2306 static inline uint64_t
2307 ice_build_ctob(uint32_t td_cmd,
2312 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2313 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2314 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2315 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2316 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2319 /* Check if the context descriptor is needed for TX offloading */
2320 static inline uint16_t
2321 ice_calc_context_desc(uint64_t flags)
2323 static uint64_t mask = PKT_TX_TCP_SEG |
2325 PKT_TX_OUTER_IP_CKSUM |
2328 return (flags & mask) ? 1 : 0;
2331 /* set ice TSO context descriptor */
2332 static inline uint64_t
2333 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2335 uint64_t ctx_desc = 0;
2336 uint32_t cd_cmd, hdr_len, cd_tso_len;
2338 if (!tx_offload.l4_len) {
2339 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2343 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2344 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2345 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2347 cd_cmd = ICE_TX_CTX_DESC_TSO;
2348 cd_tso_len = mbuf->pkt_len - hdr_len;
2349 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2350 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2351 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2356 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2357 #define ICE_MAX_DATA_PER_TXD \
2358 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2359 /* Calculate the number of TX descriptors needed for each pkt */
2360 static inline uint16_t
2361 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2363 struct rte_mbuf *txd = tx_pkt;
2366 while (txd != NULL) {
2367 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2374 /* Calculate TCP header length for PKT_TX_TCP_CKSUM if not provided */
2375 static inline uint16_t
2376 ice_calc_pkt_tcp_hdr(struct rte_mbuf *tx_pkt, union ice_tx_offload tx_offload)
2378 uint16_t tcpoff = tx_offload.l2_len + tx_offload.l3_len;
2379 const struct rte_tcp_hdr *tcp_hdr;
2380 struct rte_tcp_hdr _tcp_hdr;
2382 if (tcpoff + sizeof(struct rte_tcp_hdr) < tx_pkt->data_len) {
2383 tcp_hdr = rte_pktmbuf_mtod_offset(tx_pkt, struct rte_tcp_hdr *,
2386 return (tcp_hdr->data_off & 0xf0) >> 2;
2389 tcp_hdr = rte_pktmbuf_read(tx_pkt, tcpoff, sizeof(_tcp_hdr), &_tcp_hdr);
2391 return (tcp_hdr->data_off & 0xf0) >> 2;
2397 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2399 struct ice_tx_queue *txq;
2400 volatile struct ice_tx_desc *tx_ring;
2401 volatile struct ice_tx_desc *txd;
2402 struct ice_tx_entry *sw_ring;
2403 struct ice_tx_entry *txe, *txn;
2404 struct rte_mbuf *tx_pkt;
2405 struct rte_mbuf *m_seg;
2406 uint32_t cd_tunneling_params;
2411 uint32_t td_cmd = 0;
2412 uint32_t td_offset = 0;
2413 uint32_t td_tag = 0;
2416 uint64_t buf_dma_addr;
2418 union ice_tx_offload tx_offload = {0};
2421 sw_ring = txq->sw_ring;
2422 tx_ring = txq->tx_ring;
2423 tx_id = txq->tx_tail;
2424 txe = &sw_ring[tx_id];
2426 /* Check if the descriptor ring needs to be cleaned. */
2427 if (txq->nb_tx_free < txq->tx_free_thresh)
2428 (void)ice_xmit_cleanup(txq);
2430 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2431 tx_pkt = *tx_pkts++;
2434 ol_flags = tx_pkt->ol_flags;
2435 tx_offload.l2_len = tx_pkt->l2_len;
2436 tx_offload.l3_len = tx_pkt->l3_len;
2437 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2438 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2439 tx_offload.l4_len = tx_pkt->l4_len;
2440 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2441 /* Calculate the number of context descriptors needed. */
2442 nb_ctx = ice_calc_context_desc(ol_flags);
2444 /* The number of descriptors that must be allocated for
2445 * a packet equals to the number of the segments of that
2446 * packet plus the number of context descriptor if needed.
2447 * Recalculate the needed tx descs when TSO enabled in case
2448 * the mbuf data size exceeds max data size that hw allows
2451 if (ol_flags & PKT_TX_TCP_SEG)
2452 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2455 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2456 tx_last = (uint16_t)(tx_id + nb_used - 1);
2459 if (tx_last >= txq->nb_tx_desc)
2460 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2462 if (nb_used > txq->nb_tx_free) {
2463 if (ice_xmit_cleanup(txq) != 0) {
2468 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2469 while (nb_used > txq->nb_tx_free) {
2470 if (ice_xmit_cleanup(txq) != 0) {
2479 /* Descriptor based VLAN insertion */
2480 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2481 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2482 td_tag = tx_pkt->vlan_tci;
2485 /* Fill in tunneling parameters if necessary */
2486 cd_tunneling_params = 0;
2487 if (ol_flags & PKT_TX_TUNNEL_MASK)
2488 ice_parse_tunneling_params(ol_flags, tx_offload,
2489 &cd_tunneling_params);
2491 /* Enable checksum offloading */
2492 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
2493 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM &&
2496 ice_calc_pkt_tcp_hdr(tx_pkt, tx_offload);
2498 ice_txd_enable_checksum(ol_flags, &td_cmd,
2499 &td_offset, tx_offload);
2503 /* Setup TX context descriptor if required */
2504 volatile struct ice_tx_ctx_desc *ctx_txd =
2505 (volatile struct ice_tx_ctx_desc *)
2507 uint16_t cd_l2tag2 = 0;
2508 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2510 txn = &sw_ring[txe->next_id];
2511 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2513 rte_pktmbuf_free_seg(txe->mbuf);
2517 if (ol_flags & PKT_TX_TCP_SEG)
2518 cd_type_cmd_tso_mss |=
2519 ice_set_tso_ctx(tx_pkt, tx_offload);
2521 ctx_txd->tunneling_params =
2522 rte_cpu_to_le_32(cd_tunneling_params);
2524 /* TX context descriptor based double VLAN insert */
2525 if (ol_flags & PKT_TX_QINQ) {
2526 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2527 cd_type_cmd_tso_mss |=
2528 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2529 ICE_TXD_CTX_QW1_CMD_S);
2531 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2533 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2535 txe->last_id = tx_last;
2536 tx_id = txe->next_id;
2542 txd = &tx_ring[tx_id];
2543 txn = &sw_ring[txe->next_id];
2546 rte_pktmbuf_free_seg(txe->mbuf);
2549 /* Setup TX Descriptor */
2550 slen = m_seg->data_len;
2551 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2553 while ((ol_flags & PKT_TX_TCP_SEG) &&
2554 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2555 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2556 txd->cmd_type_offset_bsz =
2557 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2558 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2559 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2560 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2561 ICE_TXD_QW1_TX_BUF_SZ_S) |
2562 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2564 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2565 slen -= ICE_MAX_DATA_PER_TXD;
2567 txe->last_id = tx_last;
2568 tx_id = txe->next_id;
2570 txd = &tx_ring[tx_id];
2571 txn = &sw_ring[txe->next_id];
2574 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2575 txd->cmd_type_offset_bsz =
2576 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2577 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2578 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2579 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2580 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2582 txe->last_id = tx_last;
2583 tx_id = txe->next_id;
2585 m_seg = m_seg->next;
2588 /* fill the last descriptor with End of Packet (EOP) bit */
2589 td_cmd |= ICE_TX_DESC_CMD_EOP;
2590 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2591 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2593 /* set RS bit on the last descriptor of one packet */
2594 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2595 PMD_TX_FREE_LOG(DEBUG,
2596 "Setting RS bit on TXD id="
2597 "%4u (port=%d queue=%d)",
2598 tx_last, txq->port_id, txq->queue_id);
2600 td_cmd |= ICE_TX_DESC_CMD_RS;
2602 /* Update txq RS bit counters */
2603 txq->nb_tx_used = 0;
2605 txd->cmd_type_offset_bsz |=
2606 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2610 /* update Tail register */
2611 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2612 txq->tx_tail = tx_id;
2617 static __rte_always_inline int
2618 ice_tx_free_bufs(struct ice_tx_queue *txq)
2620 struct ice_tx_entry *txep;
2623 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2624 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2625 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2628 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2630 for (i = 0; i < txq->tx_rs_thresh; i++)
2631 rte_prefetch0((txep + i)->mbuf);
2633 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2634 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2635 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2639 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2640 rte_pktmbuf_free_seg(txep->mbuf);
2645 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2646 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2647 if (txq->tx_next_dd >= txq->nb_tx_desc)
2648 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2650 return txq->tx_rs_thresh;
2654 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2657 struct ice_tx_entry *swr_ring = txq->sw_ring;
2658 uint16_t i, tx_last, tx_id;
2659 uint16_t nb_tx_free_last;
2660 uint16_t nb_tx_to_clean;
2663 /* Start free mbuf from the next of tx_tail */
2664 tx_last = txq->tx_tail;
2665 tx_id = swr_ring[tx_last].next_id;
2667 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2670 nb_tx_to_clean = txq->nb_tx_free;
2671 nb_tx_free_last = txq->nb_tx_free;
2673 free_cnt = txq->nb_tx_desc;
2675 /* Loop through swr_ring to count the amount of
2676 * freeable mubfs and packets.
2678 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2679 for (i = 0; i < nb_tx_to_clean &&
2680 pkt_cnt < free_cnt &&
2681 tx_id != tx_last; i++) {
2682 if (swr_ring[tx_id].mbuf != NULL) {
2683 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2684 swr_ring[tx_id].mbuf = NULL;
2687 * last segment in the packet,
2688 * increment packet count
2690 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2693 tx_id = swr_ring[tx_id].next_id;
2696 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2697 txq->nb_tx_free || tx_id == tx_last)
2700 if (pkt_cnt < free_cnt) {
2701 if (ice_xmit_cleanup(txq))
2704 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2705 nb_tx_free_last = txq->nb_tx_free;
2709 return (int)pkt_cnt;
2714 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2715 uint32_t free_cnt __rte_unused)
2722 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2727 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2728 free_cnt = txq->nb_tx_desc;
2730 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2732 for (i = 0; i < cnt; i += n) {
2733 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2736 n = ice_tx_free_bufs(txq);
2746 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2748 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2749 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2750 struct ice_adapter *ad =
2751 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2754 if (ad->tx_vec_allowed)
2755 return ice_tx_done_cleanup_vec(q, free_cnt);
2757 if (ad->tx_simple_allowed)
2758 return ice_tx_done_cleanup_simple(q, free_cnt);
2760 return ice_tx_done_cleanup_full(q, free_cnt);
2763 /* Populate 4 descriptors with data from 4 mbufs */
2765 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2770 for (i = 0; i < 4; i++, txdp++, pkts++) {
2771 dma_addr = rte_mbuf_data_iova(*pkts);
2772 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2773 txdp->cmd_type_offset_bsz =
2774 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2775 (*pkts)->data_len, 0);
2779 /* Populate 1 descriptor with data from 1 mbuf */
2781 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2785 dma_addr = rte_mbuf_data_iova(*pkts);
2786 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2787 txdp->cmd_type_offset_bsz =
2788 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2789 (*pkts)->data_len, 0);
2793 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2796 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2797 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2798 const int N_PER_LOOP = 4;
2799 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2800 int mainpart, leftover;
2804 * Process most of the packets in chunks of N pkts. Any
2805 * leftover packets will get processed one at a time.
2807 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2808 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2809 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2810 /* Copy N mbuf pointers to the S/W ring */
2811 for (j = 0; j < N_PER_LOOP; ++j)
2812 (txep + i + j)->mbuf = *(pkts + i + j);
2813 tx4(txdp + i, pkts + i);
2816 if (unlikely(leftover > 0)) {
2817 for (i = 0; i < leftover; ++i) {
2818 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2819 tx1(txdp + mainpart + i, pkts + mainpart + i);
2824 static inline uint16_t
2825 tx_xmit_pkts(struct ice_tx_queue *txq,
2826 struct rte_mbuf **tx_pkts,
2829 volatile struct ice_tx_desc *txr = txq->tx_ring;
2833 * Begin scanning the H/W ring for done descriptors when the number
2834 * of available descriptors drops below tx_free_thresh. For each done
2835 * descriptor, free the associated buffer.
2837 if (txq->nb_tx_free < txq->tx_free_thresh)
2838 ice_tx_free_bufs(txq);
2840 /* Use available descriptor only */
2841 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2842 if (unlikely(!nb_pkts))
2845 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2846 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2847 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2848 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2849 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2850 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2852 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2856 /* Fill hardware descriptor ring with mbuf data */
2857 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2858 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2860 /* Determin if RS bit needs to be set */
2861 if (txq->tx_tail > txq->tx_next_rs) {
2862 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2863 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2866 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2867 if (txq->tx_next_rs >= txq->nb_tx_desc)
2868 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2871 if (txq->tx_tail >= txq->nb_tx_desc)
2874 /* Update the tx tail register */
2875 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2881 ice_xmit_pkts_simple(void *tx_queue,
2882 struct rte_mbuf **tx_pkts,
2887 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2888 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2892 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2895 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2896 &tx_pkts[nb_tx], num);
2897 nb_tx = (uint16_t)(nb_tx + ret);
2898 nb_pkts = (uint16_t)(nb_pkts - ret);
2907 ice_set_rx_function(struct rte_eth_dev *dev)
2909 PMD_INIT_FUNC_TRACE();
2910 struct ice_adapter *ad =
2911 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2913 struct ice_rx_queue *rxq;
2915 bool use_avx2 = false;
2917 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2918 if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
2919 ad->rx_vec_allowed = true;
2920 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2921 rxq = dev->data->rx_queues[i];
2922 if (rxq && ice_rxq_vec_setup(rxq)) {
2923 ad->rx_vec_allowed = false;
2928 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2929 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2933 ad->rx_vec_allowed = false;
2937 if (ad->rx_vec_allowed) {
2938 if (dev->data->scattered_rx) {
2940 "Using %sVector Scattered Rx (port %d).",
2941 use_avx2 ? "avx2 " : "",
2942 dev->data->port_id);
2943 dev->rx_pkt_burst = use_avx2 ?
2944 ice_recv_scattered_pkts_vec_avx2 :
2945 ice_recv_scattered_pkts_vec;
2947 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2948 use_avx2 ? "avx2 " : "",
2949 dev->data->port_id);
2950 dev->rx_pkt_burst = use_avx2 ?
2951 ice_recv_pkts_vec_avx2 :
2959 if (dev->data->scattered_rx) {
2960 /* Set the non-LRO scattered function */
2962 "Using a Scattered function on port %d.",
2963 dev->data->port_id);
2964 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2965 } else if (ad->rx_bulk_alloc_allowed) {
2967 "Rx Burst Bulk Alloc Preconditions are "
2968 "satisfied. Rx Burst Bulk Alloc function "
2969 "will be used on port %d.",
2970 dev->data->port_id);
2971 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2974 "Rx Burst Bulk Alloc Preconditions are not "
2975 "satisfied, Normal Rx will be used on port %d.",
2976 dev->data->port_id);
2977 dev->rx_pkt_burst = ice_recv_pkts;
2981 static const struct {
2982 eth_rx_burst_t pkt_burst;
2984 } ice_rx_burst_infos[] = {
2985 { ice_recv_scattered_pkts, "Scalar Scattered" },
2986 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
2987 { ice_recv_pkts, "Scalar" },
2989 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
2990 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
2991 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
2992 { ice_recv_pkts_vec, "Vector SSE" },
2997 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2998 struct rte_eth_burst_mode *mode)
3000 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3004 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3005 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3006 snprintf(mode->info, sizeof(mode->info), "%s",
3007 ice_rx_burst_infos[i].info);
3017 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3019 struct ice_adapter *ad =
3020 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3022 /* Use a simple Tx queue if possible (only fast free is allowed) */
3023 ad->tx_simple_allowed =
3025 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3026 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3028 if (ad->tx_simple_allowed)
3029 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3033 "Simple Tx can NOT be enabled on Tx queue %u.",
3037 /*********************************************************************
3041 **********************************************************************/
3042 /* The default values of TSO MSS */
3043 #define ICE_MIN_TSO_MSS 64
3044 #define ICE_MAX_TSO_MSS 9728
3045 #define ICE_MAX_TSO_FRAME_SIZE 262144
3047 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3054 for (i = 0; i < nb_pkts; i++) {
3056 ol_flags = m->ol_flags;
3058 if (ol_flags & PKT_TX_TCP_SEG &&
3059 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3060 m->tso_segsz > ICE_MAX_TSO_MSS ||
3061 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3063 * MSS outside the range are considered malicious
3069 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3070 ret = rte_validate_tx_offload(m);
3076 ret = rte_net_intel_cksum_prepare(m);
3086 ice_set_tx_function(struct rte_eth_dev *dev)
3088 struct ice_adapter *ad =
3089 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3091 struct ice_tx_queue *txq;
3093 bool use_avx2 = false;
3095 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3096 if (!ice_tx_vec_dev_check(dev)) {
3097 ad->tx_vec_allowed = true;
3098 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3099 txq = dev->data->tx_queues[i];
3100 if (txq && ice_txq_vec_setup(txq)) {
3101 ad->tx_vec_allowed = false;
3106 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3107 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
3111 ad->tx_vec_allowed = false;
3115 if (ad->tx_vec_allowed) {
3116 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3117 use_avx2 ? "avx2 " : "",
3118 dev->data->port_id);
3119 dev->tx_pkt_burst = use_avx2 ?
3120 ice_xmit_pkts_vec_avx2 :
3122 dev->tx_pkt_prepare = NULL;
3128 if (ad->tx_simple_allowed) {
3129 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3130 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3131 dev->tx_pkt_prepare = NULL;
3133 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3134 dev->tx_pkt_burst = ice_xmit_pkts;
3135 dev->tx_pkt_prepare = ice_prep_pkts;
3139 static const struct {
3140 eth_tx_burst_t pkt_burst;
3142 } ice_tx_burst_infos[] = {
3143 { ice_xmit_pkts_simple, "Scalar Simple" },
3144 { ice_xmit_pkts, "Scalar" },
3146 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3147 { ice_xmit_pkts_vec, "Vector SSE" },
3152 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3153 struct rte_eth_burst_mode *mode)
3155 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3159 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3160 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3161 snprintf(mode->info, sizeof(mode->info), "%s",
3162 ice_tx_burst_infos[i].info);
3171 /* For each value it means, datasheet of hardware can tell more details
3173 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3175 static inline uint32_t
3176 ice_get_default_pkt_type(uint16_t ptype)
3178 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3179 __rte_cache_aligned = {
3182 [1] = RTE_PTYPE_L2_ETHER,
3183 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3184 /* [3] - [5] reserved */
3185 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3186 /* [7] - [10] reserved */
3187 [11] = RTE_PTYPE_L2_ETHER_ARP,
3188 /* [12] - [21] reserved */
3190 /* Non tunneled IPv4 */
3191 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3193 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3194 RTE_PTYPE_L4_NONFRAG,
3195 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3198 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3200 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3202 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3206 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3207 RTE_PTYPE_TUNNEL_IP |
3208 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3209 RTE_PTYPE_INNER_L4_FRAG,
3210 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3211 RTE_PTYPE_TUNNEL_IP |
3212 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3213 RTE_PTYPE_INNER_L4_NONFRAG,
3214 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3215 RTE_PTYPE_TUNNEL_IP |
3216 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3217 RTE_PTYPE_INNER_L4_UDP,
3219 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3220 RTE_PTYPE_TUNNEL_IP |
3221 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3222 RTE_PTYPE_INNER_L4_TCP,
3223 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3224 RTE_PTYPE_TUNNEL_IP |
3225 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3226 RTE_PTYPE_INNER_L4_SCTP,
3227 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3228 RTE_PTYPE_TUNNEL_IP |
3229 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3230 RTE_PTYPE_INNER_L4_ICMP,
3233 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3234 RTE_PTYPE_TUNNEL_IP |
3235 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3236 RTE_PTYPE_INNER_L4_FRAG,
3237 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3238 RTE_PTYPE_TUNNEL_IP |
3239 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3240 RTE_PTYPE_INNER_L4_NONFRAG,
3241 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3242 RTE_PTYPE_TUNNEL_IP |
3243 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3244 RTE_PTYPE_INNER_L4_UDP,
3246 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3247 RTE_PTYPE_TUNNEL_IP |
3248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3249 RTE_PTYPE_INNER_L4_TCP,
3250 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3251 RTE_PTYPE_TUNNEL_IP |
3252 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3253 RTE_PTYPE_INNER_L4_SCTP,
3254 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3255 RTE_PTYPE_TUNNEL_IP |
3256 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3257 RTE_PTYPE_INNER_L4_ICMP,
3259 /* IPv4 --> GRE/Teredo/VXLAN */
3260 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3261 RTE_PTYPE_TUNNEL_GRENAT,
3263 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3264 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3265 RTE_PTYPE_TUNNEL_GRENAT |
3266 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3267 RTE_PTYPE_INNER_L4_FRAG,
3268 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3269 RTE_PTYPE_TUNNEL_GRENAT |
3270 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3271 RTE_PTYPE_INNER_L4_NONFRAG,
3272 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3273 RTE_PTYPE_TUNNEL_GRENAT |
3274 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3275 RTE_PTYPE_INNER_L4_UDP,
3277 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3278 RTE_PTYPE_TUNNEL_GRENAT |
3279 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3280 RTE_PTYPE_INNER_L4_TCP,
3281 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3282 RTE_PTYPE_TUNNEL_GRENAT |
3283 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3284 RTE_PTYPE_INNER_L4_SCTP,
3285 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3286 RTE_PTYPE_TUNNEL_GRENAT |
3287 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3288 RTE_PTYPE_INNER_L4_ICMP,
3290 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3291 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3292 RTE_PTYPE_TUNNEL_GRENAT |
3293 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3294 RTE_PTYPE_INNER_L4_FRAG,
3295 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3296 RTE_PTYPE_TUNNEL_GRENAT |
3297 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3298 RTE_PTYPE_INNER_L4_NONFRAG,
3299 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3300 RTE_PTYPE_TUNNEL_GRENAT |
3301 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3302 RTE_PTYPE_INNER_L4_UDP,
3304 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3305 RTE_PTYPE_TUNNEL_GRENAT |
3306 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3307 RTE_PTYPE_INNER_L4_TCP,
3308 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3309 RTE_PTYPE_TUNNEL_GRENAT |
3310 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3311 RTE_PTYPE_INNER_L4_SCTP,
3312 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3313 RTE_PTYPE_TUNNEL_GRENAT |
3314 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3315 RTE_PTYPE_INNER_L4_ICMP,
3317 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3318 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3319 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3321 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3322 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3323 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3324 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3325 RTE_PTYPE_INNER_L4_FRAG,
3326 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3327 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3328 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3329 RTE_PTYPE_INNER_L4_NONFRAG,
3330 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3331 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3332 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3333 RTE_PTYPE_INNER_L4_UDP,
3335 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3336 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3337 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3338 RTE_PTYPE_INNER_L4_TCP,
3339 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3340 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3341 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3342 RTE_PTYPE_INNER_L4_SCTP,
3343 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3344 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3345 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3346 RTE_PTYPE_INNER_L4_ICMP,
3348 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3349 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3350 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3351 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3352 RTE_PTYPE_INNER_L4_FRAG,
3353 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3354 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3355 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3356 RTE_PTYPE_INNER_L4_NONFRAG,
3357 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3358 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3359 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3360 RTE_PTYPE_INNER_L4_UDP,
3362 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3363 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3364 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3365 RTE_PTYPE_INNER_L4_TCP,
3366 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3367 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3368 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3369 RTE_PTYPE_INNER_L4_SCTP,
3370 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3371 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3372 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3373 RTE_PTYPE_INNER_L4_ICMP,
3374 /* [73] - [87] reserved */
3376 /* Non tunneled IPv6 */
3377 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3379 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3380 RTE_PTYPE_L4_NONFRAG,
3381 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3384 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3386 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3388 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3392 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3393 RTE_PTYPE_TUNNEL_IP |
3394 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3395 RTE_PTYPE_INNER_L4_FRAG,
3396 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3397 RTE_PTYPE_TUNNEL_IP |
3398 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3399 RTE_PTYPE_INNER_L4_NONFRAG,
3400 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3401 RTE_PTYPE_TUNNEL_IP |
3402 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3403 RTE_PTYPE_INNER_L4_UDP,
3405 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3406 RTE_PTYPE_TUNNEL_IP |
3407 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3408 RTE_PTYPE_INNER_L4_TCP,
3409 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3410 RTE_PTYPE_TUNNEL_IP |
3411 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3412 RTE_PTYPE_INNER_L4_SCTP,
3413 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3414 RTE_PTYPE_TUNNEL_IP |
3415 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3416 RTE_PTYPE_INNER_L4_ICMP,
3419 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3420 RTE_PTYPE_TUNNEL_IP |
3421 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3422 RTE_PTYPE_INNER_L4_FRAG,
3423 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3424 RTE_PTYPE_TUNNEL_IP |
3425 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3426 RTE_PTYPE_INNER_L4_NONFRAG,
3427 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3428 RTE_PTYPE_TUNNEL_IP |
3429 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3430 RTE_PTYPE_INNER_L4_UDP,
3431 /* [105] reserved */
3432 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3433 RTE_PTYPE_TUNNEL_IP |
3434 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3435 RTE_PTYPE_INNER_L4_TCP,
3436 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3437 RTE_PTYPE_TUNNEL_IP |
3438 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3439 RTE_PTYPE_INNER_L4_SCTP,
3440 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3441 RTE_PTYPE_TUNNEL_IP |
3442 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3443 RTE_PTYPE_INNER_L4_ICMP,
3445 /* IPv6 --> GRE/Teredo/VXLAN */
3446 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3447 RTE_PTYPE_TUNNEL_GRENAT,
3449 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3450 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3451 RTE_PTYPE_TUNNEL_GRENAT |
3452 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3453 RTE_PTYPE_INNER_L4_FRAG,
3454 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3455 RTE_PTYPE_TUNNEL_GRENAT |
3456 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3457 RTE_PTYPE_INNER_L4_NONFRAG,
3458 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3459 RTE_PTYPE_TUNNEL_GRENAT |
3460 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3461 RTE_PTYPE_INNER_L4_UDP,
3462 /* [113] reserved */
3463 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3464 RTE_PTYPE_TUNNEL_GRENAT |
3465 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3466 RTE_PTYPE_INNER_L4_TCP,
3467 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3468 RTE_PTYPE_TUNNEL_GRENAT |
3469 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3470 RTE_PTYPE_INNER_L4_SCTP,
3471 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3472 RTE_PTYPE_TUNNEL_GRENAT |
3473 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3474 RTE_PTYPE_INNER_L4_ICMP,
3476 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3477 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3478 RTE_PTYPE_TUNNEL_GRENAT |
3479 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3480 RTE_PTYPE_INNER_L4_FRAG,
3481 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3482 RTE_PTYPE_TUNNEL_GRENAT |
3483 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3484 RTE_PTYPE_INNER_L4_NONFRAG,
3485 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3486 RTE_PTYPE_TUNNEL_GRENAT |
3487 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3488 RTE_PTYPE_INNER_L4_UDP,
3489 /* [120] reserved */
3490 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3491 RTE_PTYPE_TUNNEL_GRENAT |
3492 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3493 RTE_PTYPE_INNER_L4_TCP,
3494 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3495 RTE_PTYPE_TUNNEL_GRENAT |
3496 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3497 RTE_PTYPE_INNER_L4_SCTP,
3498 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3499 RTE_PTYPE_TUNNEL_GRENAT |
3500 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3501 RTE_PTYPE_INNER_L4_ICMP,
3503 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3504 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3505 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3507 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3508 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3509 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3510 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3511 RTE_PTYPE_INNER_L4_FRAG,
3512 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3513 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3514 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3515 RTE_PTYPE_INNER_L4_NONFRAG,
3516 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3517 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3518 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3519 RTE_PTYPE_INNER_L4_UDP,
3520 /* [128] reserved */
3521 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3522 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3523 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3524 RTE_PTYPE_INNER_L4_TCP,
3525 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3526 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3527 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3528 RTE_PTYPE_INNER_L4_SCTP,
3529 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3530 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3531 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3532 RTE_PTYPE_INNER_L4_ICMP,
3534 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3535 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3536 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3537 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3538 RTE_PTYPE_INNER_L4_FRAG,
3539 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3540 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3541 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3542 RTE_PTYPE_INNER_L4_NONFRAG,
3543 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3544 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3545 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3546 RTE_PTYPE_INNER_L4_UDP,
3547 /* [135] reserved */
3548 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3549 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3550 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3551 RTE_PTYPE_INNER_L4_TCP,
3552 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3553 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3554 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3555 RTE_PTYPE_INNER_L4_SCTP,
3556 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3557 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3558 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3559 RTE_PTYPE_INNER_L4_ICMP,
3560 /* [139] - [299] reserved */
3563 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3564 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3566 /* PPPoE --> IPv4 */
3567 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3568 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3570 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3571 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3572 RTE_PTYPE_L4_NONFRAG,
3573 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3574 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3576 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3577 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3579 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3580 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3582 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3583 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3586 /* PPPoE --> IPv6 */
3587 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3588 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3590 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3591 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3592 RTE_PTYPE_L4_NONFRAG,
3593 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3594 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3596 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3597 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3599 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3600 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3602 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3603 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3605 /* [314] - [324] reserved */
3607 /* IPv4/IPv6 --> GTPC/GTPU */
3608 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3609 RTE_PTYPE_TUNNEL_GTPC,
3610 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3611 RTE_PTYPE_TUNNEL_GTPC,
3612 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3613 RTE_PTYPE_TUNNEL_GTPC,
3614 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3615 RTE_PTYPE_TUNNEL_GTPC,
3616 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3617 RTE_PTYPE_TUNNEL_GTPU,
3618 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3619 RTE_PTYPE_TUNNEL_GTPU,
3621 /* IPv4 --> GTPU --> IPv4 */
3622 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3623 RTE_PTYPE_TUNNEL_GTPU |
3624 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3625 RTE_PTYPE_INNER_L4_FRAG,
3626 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3627 RTE_PTYPE_TUNNEL_GTPU |
3628 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3629 RTE_PTYPE_INNER_L4_NONFRAG,
3630 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3631 RTE_PTYPE_TUNNEL_GTPU |
3632 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3633 RTE_PTYPE_INNER_L4_UDP,
3634 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3635 RTE_PTYPE_TUNNEL_GTPU |
3636 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3637 RTE_PTYPE_INNER_L4_TCP,
3638 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3639 RTE_PTYPE_TUNNEL_GTPU |
3640 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3641 RTE_PTYPE_INNER_L4_ICMP,
3643 /* IPv6 --> GTPU --> IPv4 */
3644 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3645 RTE_PTYPE_TUNNEL_GTPU |
3646 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3647 RTE_PTYPE_INNER_L4_FRAG,
3648 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3649 RTE_PTYPE_TUNNEL_GTPU |
3650 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3651 RTE_PTYPE_INNER_L4_NONFRAG,
3652 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3653 RTE_PTYPE_TUNNEL_GTPU |
3654 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3655 RTE_PTYPE_INNER_L4_UDP,
3656 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3657 RTE_PTYPE_TUNNEL_GTPU |
3658 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3659 RTE_PTYPE_INNER_L4_TCP,
3660 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3661 RTE_PTYPE_TUNNEL_GTPU |
3662 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3663 RTE_PTYPE_INNER_L4_ICMP,
3665 /* IPv4 --> GTPU --> IPv6 */
3666 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3667 RTE_PTYPE_TUNNEL_GTPU |
3668 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3669 RTE_PTYPE_INNER_L4_FRAG,
3670 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3671 RTE_PTYPE_TUNNEL_GTPU |
3672 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3673 RTE_PTYPE_INNER_L4_NONFRAG,
3674 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3675 RTE_PTYPE_TUNNEL_GTPU |
3676 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3677 RTE_PTYPE_INNER_L4_UDP,
3678 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3679 RTE_PTYPE_TUNNEL_GTPU |
3680 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3681 RTE_PTYPE_INNER_L4_TCP,
3682 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3683 RTE_PTYPE_TUNNEL_GTPU |
3684 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3685 RTE_PTYPE_INNER_L4_ICMP,
3687 /* IPv6 --> GTPU --> IPv6 */
3688 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3689 RTE_PTYPE_TUNNEL_GTPU |
3690 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3691 RTE_PTYPE_INNER_L4_FRAG,
3692 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3693 RTE_PTYPE_TUNNEL_GTPU |
3694 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3695 RTE_PTYPE_INNER_L4_NONFRAG,
3696 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3697 RTE_PTYPE_TUNNEL_GTPU |
3698 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3699 RTE_PTYPE_INNER_L4_UDP,
3700 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3701 RTE_PTYPE_TUNNEL_GTPU |
3702 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3703 RTE_PTYPE_INNER_L4_TCP,
3704 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3705 RTE_PTYPE_TUNNEL_GTPU |
3706 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3707 RTE_PTYPE_INNER_L4_ICMP,
3708 /* All others reserved */
3711 return type_table[ptype];
3715 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3717 struct ice_adapter *ad =
3718 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3721 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3722 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
3725 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
3726 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
3727 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
3728 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
3729 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
3731 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
3732 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
3733 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
3734 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
3735 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
3736 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
3739 * check the programming status descriptor in rx queue.
3740 * done after Programming Flow Director is programmed on
3744 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
3746 volatile union ice_32byte_rx_desc *rxdp;
3753 rxdp = (volatile union ice_32byte_rx_desc *)
3754 (&rxq->rx_ring[rxq->rx_tail]);
3755 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
3756 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
3757 >> ICE_RXD_QW1_STATUS_S;
3759 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
3761 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
3762 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
3763 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
3764 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
3766 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
3767 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
3768 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
3769 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
3773 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
3774 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
3776 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
3780 rxdp->wb.qword1.status_error_len = 0;
3782 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
3784 if (rxq->rx_tail == 0)
3785 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
3787 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
3793 #define ICE_FDIR_MAX_WAIT_US 10000
3796 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
3798 struct ice_tx_queue *txq = pf->fdir.txq;
3799 struct ice_rx_queue *rxq = pf->fdir.rxq;
3800 volatile struct ice_fltr_desc *fdirdp;
3801 volatile struct ice_tx_desc *txdp;
3805 fdirdp = (volatile struct ice_fltr_desc *)
3806 (&txq->tx_ring[txq->tx_tail]);
3807 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
3808 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
3810 txdp = &txq->tx_ring[txq->tx_tail + 1];
3811 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
3812 td_cmd = ICE_TX_DESC_CMD_EOP |
3813 ICE_TX_DESC_CMD_RS |
3814 ICE_TX_DESC_CMD_DUMMY;
3816 txdp->cmd_type_offset_bsz =
3817 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
3820 if (txq->tx_tail >= txq->nb_tx_desc)
3822 /* Update the tx tail register */
3823 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
3824 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
3825 if ((txdp->cmd_type_offset_bsz &
3826 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
3827 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3831 if (i >= ICE_FDIR_MAX_WAIT_US) {
3833 "Failed to program FDIR filter: time out to get DD on tx queue.");
3837 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
3840 ret = ice_check_fdir_programming_status(rxq);
3848 "Failed to program FDIR filter: programming status reported.");