1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
8 #include "rte_pmd_ice.h"
11 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
15 PKT_TX_OUTER_IP_CKSUM)
17 /* Offset of mbuf dynamic field for protocol extraction data */
18 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
20 /* Mask of mbuf dynamic flags for protocol extraction type */
21 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
22 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
27 static inline uint64_t
28 ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid)
30 static uint64_t *ol_flag_map[] = {
31 [ICE_RXDID_COMMS_AUX_VLAN] =
32 &rte_net_ice_dynflag_proto_xtr_vlan_mask,
33 [ICE_RXDID_COMMS_AUX_IPV4] =
34 &rte_net_ice_dynflag_proto_xtr_ipv4_mask,
35 [ICE_RXDID_COMMS_AUX_IPV6] =
36 &rte_net_ice_dynflag_proto_xtr_ipv6_mask,
37 [ICE_RXDID_COMMS_AUX_IPV6_FLOW] =
38 &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
39 [ICE_RXDID_COMMS_AUX_TCP] =
40 &rte_net_ice_dynflag_proto_xtr_tcp_mask,
44 ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL;
46 return ol_flag != NULL ? *ol_flag : 0ULL;
50 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
52 static uint8_t rxdid_map[] = {
53 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_GENERIC,
54 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
55 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
56 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
57 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
58 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
61 return xtr_type < RTE_DIM(rxdid_map) ?
62 rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC;
65 static enum ice_status
66 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
68 struct ice_vsi *vsi = rxq->vsi;
69 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
70 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
71 struct ice_rlan_ctx rx_ctx;
73 uint16_t buf_size, len;
74 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
75 uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
78 /* Set buffer size as the head split is disabled. */
79 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
80 RTE_PKTMBUF_HEADROOM);
82 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
83 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
84 rxq->max_pkt_len = RTE_MIN(len,
85 dev->data->dev_conf.rxmode.max_rx_pkt_len);
87 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
88 if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
89 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
90 PMD_DRV_LOG(ERR, "maximum packet length must "
91 "be larger than %u and smaller than %u,"
92 "as jumbo frame is enabled",
93 (uint32_t)RTE_ETHER_MAX_LEN,
94 (uint32_t)ICE_FRAME_SIZE_MAX);
98 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
99 rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
100 PMD_DRV_LOG(ERR, "maximum packet length must be "
101 "larger than %u and smaller than %u, "
102 "as jumbo frame is disabled",
103 (uint32_t)RTE_ETHER_MIN_LEN,
104 (uint32_t)RTE_ETHER_MAX_LEN);
109 memset(&rx_ctx, 0, sizeof(rx_ctx));
111 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
112 rx_ctx.qlen = rxq->nb_rx_desc;
113 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
114 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
115 rx_ctx.dtype = 0; /* No Header Split mode */
116 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
117 rx_ctx.dsize = 1; /* 32B descriptors */
119 rx_ctx.rxmax = rxq->max_pkt_len;
120 /* TPH: Transaction Layer Packet (TLP) processing hints */
121 rx_ctx.tphrdesc_ena = 1;
122 rx_ctx.tphwdesc_ena = 1;
123 rx_ctx.tphdata_ena = 1;
124 rx_ctx.tphhead_ena = 1;
125 /* Low Receive Queue Threshold defined in 64 descriptors units.
126 * When the number of free descriptors goes below the lrxqthresh,
127 * an immediate interrupt is triggered.
129 rx_ctx.lrxqthresh = 2;
130 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
133 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
135 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
137 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
138 rxq->port_id, rxq->queue_id, rxdid);
140 /* Enable Flexible Descriptors in the queue context which
141 * allows this driver to select a specific receive descriptor format
143 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
144 QRXFLXP_CNTXT_RXDID_IDX_M;
146 /* increasing context priority to pick up profile ID;
147 * default is 0x01; setting to 0x03 to ensure profile
148 * is programming if prev context is of same priority
150 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
151 QRXFLXP_CNTXT_RXDID_PRIO_M;
153 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
155 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
157 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
161 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
163 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
168 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
169 RTE_PKTMBUF_HEADROOM);
171 /* Check if scattered RX needs to be used. */
172 if (rxq->max_pkt_len > buf_size)
173 dev->data->scattered_rx = 1;
175 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
177 /* Init the Rx tail register*/
178 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
183 /* Allocate mbufs for all descriptors in rx queue */
185 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
187 struct ice_rx_entry *rxe = rxq->sw_ring;
191 for (i = 0; i < rxq->nb_rx_desc; i++) {
192 volatile union ice_rx_flex_desc *rxd;
193 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
195 if (unlikely(!mbuf)) {
196 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
200 rte_mbuf_refcnt_set(mbuf, 1);
202 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
204 mbuf->port = rxq->port_id;
207 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
209 rxd = &rxq->rx_ring[i];
210 rxd->read.pkt_addr = dma_addr;
211 rxd->read.hdr_addr = 0;
212 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
222 /* Free all mbufs for descriptors in rx queue */
224 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
228 if (!rxq || !rxq->sw_ring) {
229 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
233 for (i = 0; i < rxq->nb_rx_desc; i++) {
234 if (rxq->sw_ring[i].mbuf) {
235 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
236 rxq->sw_ring[i].mbuf = NULL;
239 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
240 if (rxq->rx_nb_avail == 0)
242 for (i = 0; i < rxq->rx_nb_avail; i++) {
243 struct rte_mbuf *mbuf;
245 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
246 rte_pktmbuf_free_seg(mbuf);
248 rxq->rx_nb_avail = 0;
249 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
253 ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
255 rxq->rx_rel_mbufs(rxq);
258 /* turn on or off rx queue
259 * @q_idx: queue index in pf scope
260 * @on: turn on or off the queue
263 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
268 /* QRX_CTRL = QRX_ENA */
269 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
272 if (reg & QRX_CTRL_QENA_STAT_M)
273 return 0; /* Already on, skip */
274 reg |= QRX_CTRL_QENA_REQ_M;
276 if (!(reg & QRX_CTRL_QENA_STAT_M))
277 return 0; /* Already off, skip */
278 reg &= ~QRX_CTRL_QENA_REQ_M;
281 /* Write the register */
282 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
283 /* Check the result. It is said that QENA_STAT
284 * follows the QENA_REQ not more than 10 use.
285 * TODO: need to change the wait counter later
287 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
288 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
289 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
291 if ((reg & QRX_CTRL_QENA_REQ_M) &&
292 (reg & QRX_CTRL_QENA_STAT_M))
295 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
296 !(reg & QRX_CTRL_QENA_STAT_M))
301 /* Check if it is timeout */
302 if (j >= ICE_CHK_Q_ENA_COUNT) {
303 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
304 (on ? "enable" : "disable"), q_idx);
312 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
313 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
315 ice_check_rx_burst_bulk_alloc_preconditions
316 (__rte_unused struct ice_rx_queue *rxq)
321 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
322 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
323 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
324 "rxq->rx_free_thresh=%d, "
325 "ICE_RX_MAX_BURST=%d",
326 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
328 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
329 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
330 "rxq->rx_free_thresh=%d, "
331 "rxq->nb_rx_desc=%d",
332 rxq->rx_free_thresh, rxq->nb_rx_desc);
334 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
335 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
336 "rxq->nb_rx_desc=%d, "
337 "rxq->rx_free_thresh=%d",
338 rxq->nb_rx_desc, rxq->rx_free_thresh);
348 /* reset fields in ice_rx_queue back to default */
350 ice_reset_rx_queue(struct ice_rx_queue *rxq)
356 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
360 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
361 if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
362 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
364 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
365 len = rxq->nb_rx_desc;
367 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
368 ((volatile char *)rxq->rx_ring)[i] = 0;
370 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
371 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
372 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
373 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
375 rxq->rx_nb_avail = 0;
376 rxq->rx_next_avail = 0;
377 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
378 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
382 rxq->pkt_first_seg = NULL;
383 rxq->pkt_last_seg = NULL;
385 rxq->rxrearm_start = 0;
390 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
392 struct ice_rx_queue *rxq;
394 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
396 PMD_INIT_FUNC_TRACE();
398 if (rx_queue_id >= dev->data->nb_rx_queues) {
399 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
400 rx_queue_id, dev->data->nb_rx_queues);
404 rxq = dev->data->rx_queues[rx_queue_id];
405 if (!rxq || !rxq->q_set) {
406 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
411 err = ice_program_hw_rx_queue(rxq);
413 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
418 err = ice_alloc_rx_queue_mbufs(rxq);
420 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
424 /* Init the RX tail register. */
425 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
427 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
429 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
432 ice_rx_queue_release_mbufs(rxq);
433 ice_reset_rx_queue(rxq);
437 dev->data->rx_queue_state[rx_queue_id] =
438 RTE_ETH_QUEUE_STATE_STARTED;
444 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
446 struct ice_rx_queue *rxq;
448 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
450 if (rx_queue_id < dev->data->nb_rx_queues) {
451 rxq = dev->data->rx_queues[rx_queue_id];
453 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
455 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
459 ice_rx_queue_release_mbufs(rxq);
460 ice_reset_rx_queue(rxq);
461 dev->data->rx_queue_state[rx_queue_id] =
462 RTE_ETH_QUEUE_STATE_STOPPED;
469 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
471 struct ice_tx_queue *txq;
475 struct ice_aqc_add_tx_qgrp txq_elem;
476 struct ice_tlan_ctx tx_ctx;
478 PMD_INIT_FUNC_TRACE();
480 if (tx_queue_id >= dev->data->nb_tx_queues) {
481 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
482 tx_queue_id, dev->data->nb_tx_queues);
486 txq = dev->data->tx_queues[tx_queue_id];
487 if (!txq || !txq->q_set) {
488 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
494 hw = ICE_VSI_TO_HW(vsi);
496 memset(&txq_elem, 0, sizeof(txq_elem));
497 memset(&tx_ctx, 0, sizeof(tx_ctx));
498 txq_elem.num_txqs = 1;
499 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
501 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
502 tx_ctx.qlen = txq->nb_tx_desc;
503 tx_ctx.pf_num = hw->pf_id;
504 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
505 tx_ctx.src_vsi = vsi->vsi_id;
506 tx_ctx.port_num = hw->port_info->lport;
507 tx_ctx.tso_ena = 1; /* tso enable */
508 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
509 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
511 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
514 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
516 /* Init the Tx tail register*/
517 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
519 /* Fix me, we assume TC always 0 here */
520 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
521 &txq_elem, sizeof(txq_elem), NULL);
523 PMD_DRV_LOG(ERR, "Failed to add lan txq");
526 /* store the schedule node id */
527 txq->q_teid = txq_elem.txqs[0].q_teid;
529 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
533 static enum ice_status
534 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
536 struct ice_vsi *vsi = rxq->vsi;
537 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
538 uint32_t rxdid = ICE_RXDID_LEGACY_1;
539 struct ice_rlan_ctx rx_ctx;
544 rxq->rx_buf_len = 1024;
546 memset(&rx_ctx, 0, sizeof(rx_ctx));
548 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
549 rx_ctx.qlen = rxq->nb_rx_desc;
550 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
551 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
552 rx_ctx.dtype = 0; /* No Header Split mode */
553 rx_ctx.dsize = 1; /* 32B descriptors */
554 rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
555 /* TPH: Transaction Layer Packet (TLP) processing hints */
556 rx_ctx.tphrdesc_ena = 1;
557 rx_ctx.tphwdesc_ena = 1;
558 rx_ctx.tphdata_ena = 1;
559 rx_ctx.tphhead_ena = 1;
560 /* Low Receive Queue Threshold defined in 64 descriptors units.
561 * When the number of free descriptors goes below the lrxqthresh,
562 * an immediate interrupt is triggered.
564 rx_ctx.lrxqthresh = 2;
565 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
568 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
570 /* Enable Flexible Descriptors in the queue context which
571 * allows this driver to select a specific receive descriptor format
573 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
574 QRXFLXP_CNTXT_RXDID_IDX_M;
576 /* increasing context priority to pick up profile ID;
577 * default is 0x01; setting to 0x03 to ensure profile
578 * is programming if prev context is of same priority
580 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
581 QRXFLXP_CNTXT_RXDID_PRIO_M;
583 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
585 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
587 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
591 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
593 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
598 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
600 /* Init the Rx tail register*/
601 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
607 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
609 struct ice_rx_queue *rxq;
611 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
612 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
614 PMD_INIT_FUNC_TRACE();
617 if (!rxq || !rxq->q_set) {
618 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
623 err = ice_fdir_program_hw_rx_queue(rxq);
625 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
630 /* Init the RX tail register. */
631 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
633 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
635 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
638 ice_reset_rx_queue(rxq);
646 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
648 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
649 struct ice_tx_queue *txq;
653 struct ice_aqc_add_tx_qgrp txq_elem;
654 struct ice_tlan_ctx tx_ctx;
656 PMD_INIT_FUNC_TRACE();
659 if (!txq || !txq->q_set) {
660 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
666 hw = ICE_VSI_TO_HW(vsi);
668 memset(&txq_elem, 0, sizeof(txq_elem));
669 memset(&tx_ctx, 0, sizeof(tx_ctx));
670 txq_elem.num_txqs = 1;
671 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
673 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
674 tx_ctx.qlen = txq->nb_tx_desc;
675 tx_ctx.pf_num = hw->pf_id;
676 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
677 tx_ctx.src_vsi = vsi->vsi_id;
678 tx_ctx.port_num = hw->port_info->lport;
679 tx_ctx.tso_ena = 1; /* tso enable */
680 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
681 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
683 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
686 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
688 /* Init the Tx tail register*/
689 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
691 /* Fix me, we assume TC always 0 here */
692 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
693 &txq_elem, sizeof(txq_elem), NULL);
695 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
698 /* store the schedule node id */
699 txq->q_teid = txq_elem.txqs[0].q_teid;
704 /* Free all mbufs for descriptors in tx queue */
706 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
710 if (!txq || !txq->sw_ring) {
711 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
715 for (i = 0; i < txq->nb_tx_desc; i++) {
716 if (txq->sw_ring[i].mbuf) {
717 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
718 txq->sw_ring[i].mbuf = NULL;
723 ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
725 txq->tx_rel_mbufs(txq);
729 ice_reset_tx_queue(struct ice_tx_queue *txq)
731 struct ice_tx_entry *txe;
732 uint16_t i, prev, size;
735 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
740 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
741 for (i = 0; i < size; i++)
742 ((volatile char *)txq->tx_ring)[i] = 0;
744 prev = (uint16_t)(txq->nb_tx_desc - 1);
745 for (i = 0; i < txq->nb_tx_desc; i++) {
746 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
748 txd->cmd_type_offset_bsz =
749 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
752 txe[prev].next_id = i;
756 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
757 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
762 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
763 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
767 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
769 struct ice_tx_queue *txq;
770 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
771 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
772 struct ice_vsi *vsi = pf->main_vsi;
773 enum ice_status status;
776 uint16_t q_handle = tx_queue_id;
778 if (tx_queue_id >= dev->data->nb_tx_queues) {
779 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
780 tx_queue_id, dev->data->nb_tx_queues);
784 txq = dev->data->tx_queues[tx_queue_id];
786 PMD_DRV_LOG(ERR, "TX queue %u is not available",
791 q_ids[0] = txq->reg_idx;
792 q_teids[0] = txq->q_teid;
794 /* Fix me, we assume TC always 0 here */
795 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
796 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
797 if (status != ICE_SUCCESS) {
798 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
802 ice_tx_queue_release_mbufs(txq);
803 ice_reset_tx_queue(txq);
804 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
810 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
812 struct ice_rx_queue *rxq;
814 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
815 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
819 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
821 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
825 ice_rx_queue_release_mbufs(rxq);
831 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
833 struct ice_tx_queue *txq;
834 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
835 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
836 struct ice_vsi *vsi = pf->main_vsi;
837 enum ice_status status;
840 uint16_t q_handle = tx_queue_id;
844 PMD_DRV_LOG(ERR, "TX queue %u is not available",
850 q_ids[0] = txq->reg_idx;
851 q_teids[0] = txq->q_teid;
853 /* Fix me, we assume TC always 0 here */
854 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
855 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
856 if (status != ICE_SUCCESS) {
857 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
861 ice_tx_queue_release_mbufs(txq);
867 ice_rx_queue_setup(struct rte_eth_dev *dev,
870 unsigned int socket_id,
871 const struct rte_eth_rxconf *rx_conf,
872 struct rte_mempool *mp)
874 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
875 struct ice_adapter *ad =
876 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
877 struct ice_vsi *vsi = pf->main_vsi;
878 struct ice_rx_queue *rxq;
879 const struct rte_memzone *rz;
882 int use_def_burst_func = 1;
884 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
885 nb_desc > ICE_MAX_RING_DESC ||
886 nb_desc < ICE_MIN_RING_DESC) {
887 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
892 /* Free memory if needed */
893 if (dev->data->rx_queues[queue_idx]) {
894 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
895 dev->data->rx_queues[queue_idx] = NULL;
898 /* Allocate the rx queue data structure */
899 rxq = rte_zmalloc_socket(NULL,
900 sizeof(struct ice_rx_queue),
904 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
905 "rx queue data structure");
909 rxq->nb_rx_desc = nb_desc;
910 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
911 rxq->queue_id = queue_idx;
913 rxq->reg_idx = vsi->base_queue + queue_idx;
914 rxq->port_id = dev->data->port_id;
915 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
916 rxq->crc_len = RTE_ETHER_CRC_LEN;
920 rxq->drop_en = rx_conf->rx_drop_en;
922 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
923 rxq->proto_xtr = pf->proto_xtr != NULL ?
924 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
926 /* Allocate the maximun number of RX ring hardware descriptor. */
927 len = ICE_MAX_RING_DESC;
929 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
931 * Allocating a little more memory because vectorized/bulk_alloc Rx
932 * functions doesn't check boundaries each time.
934 len += ICE_RX_MAX_BURST;
937 /* Allocate the maximum number of RX ring hardware descriptor. */
938 ring_size = sizeof(union ice_rx_flex_desc) * len;
939 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
940 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
941 ring_size, ICE_RING_BASE_ALIGN,
944 ice_rx_queue_release(rxq);
945 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
949 /* Zero all the descriptors in the ring. */
950 memset(rz->addr, 0, ring_size);
952 rxq->rx_ring_dma = rz->iova;
953 rxq->rx_ring = rz->addr;
955 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
956 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
961 /* Allocate the software ring. */
962 rxq->sw_ring = rte_zmalloc_socket(NULL,
963 sizeof(struct ice_rx_entry) * len,
967 ice_rx_queue_release(rxq);
968 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
972 ice_reset_rx_queue(rxq);
974 dev->data->rx_queues[queue_idx] = rxq;
975 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
977 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
979 if (!use_def_burst_func) {
980 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
981 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
982 "satisfied. Rx Burst Bulk Alloc function will be "
983 "used on port=%d, queue=%d.",
984 rxq->port_id, rxq->queue_id);
985 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
987 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
988 "not satisfied, Scattered Rx is requested, "
989 "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
990 "not enabled on port=%d, queue=%d.",
991 rxq->port_id, rxq->queue_id);
992 ad->rx_bulk_alloc_allowed = false;
999 ice_rx_queue_release(void *rxq)
1001 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1004 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1008 ice_rx_queue_release_mbufs(q);
1009 rte_free(q->sw_ring);
1014 ice_tx_queue_setup(struct rte_eth_dev *dev,
1017 unsigned int socket_id,
1018 const struct rte_eth_txconf *tx_conf)
1020 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1021 struct ice_vsi *vsi = pf->main_vsi;
1022 struct ice_tx_queue *txq;
1023 const struct rte_memzone *tz;
1025 uint16_t tx_rs_thresh, tx_free_thresh;
1028 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1030 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1031 nb_desc > ICE_MAX_RING_DESC ||
1032 nb_desc < ICE_MIN_RING_DESC) {
1033 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1034 "invalid", nb_desc);
1039 * The following two parameters control the setting of the RS bit on
1040 * transmit descriptors. TX descriptors will have their RS bit set
1041 * after txq->tx_rs_thresh descriptors have been used. The TX
1042 * descriptor ring will be cleaned after txq->tx_free_thresh
1043 * descriptors are used or if the number of descriptors required to
1044 * transmit a packet is greater than the number of free TX descriptors.
1046 * The following constraints must be satisfied:
1047 * - tx_rs_thresh must be greater than 0.
1048 * - tx_rs_thresh must be less than the size of the ring minus 2.
1049 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1050 * - tx_rs_thresh must be a divisor of the ring size.
1051 * - tx_free_thresh must be greater than 0.
1052 * - tx_free_thresh must be less than the size of the ring minus 3.
1053 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1055 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1056 * race condition, hence the maximum threshold constraints. When set
1057 * to zero use default values.
1059 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1060 tx_conf->tx_free_thresh :
1061 ICE_DEFAULT_TX_FREE_THRESH);
1062 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1064 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1065 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1066 if (tx_conf->tx_rs_thresh)
1067 tx_rs_thresh = tx_conf->tx_rs_thresh;
1068 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1069 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1070 "exceed nb_desc. (tx_rs_thresh=%u "
1071 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1072 (unsigned int)tx_rs_thresh,
1073 (unsigned int)tx_free_thresh,
1074 (unsigned int)nb_desc,
1075 (int)dev->data->port_id,
1079 if (tx_rs_thresh >= (nb_desc - 2)) {
1080 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1081 "number of TX descriptors minus 2. "
1082 "(tx_rs_thresh=%u port=%d queue=%d)",
1083 (unsigned int)tx_rs_thresh,
1084 (int)dev->data->port_id,
1088 if (tx_free_thresh >= (nb_desc - 3)) {
1089 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1090 "tx_free_thresh must be less than the "
1091 "number of TX descriptors minus 3. "
1092 "(tx_free_thresh=%u port=%d queue=%d)",
1093 (unsigned int)tx_free_thresh,
1094 (int)dev->data->port_id,
1098 if (tx_rs_thresh > tx_free_thresh) {
1099 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1100 "equal to tx_free_thresh. (tx_free_thresh=%u"
1101 " tx_rs_thresh=%u port=%d queue=%d)",
1102 (unsigned int)tx_free_thresh,
1103 (unsigned int)tx_rs_thresh,
1104 (int)dev->data->port_id,
1108 if ((nb_desc % tx_rs_thresh) != 0) {
1109 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1110 "number of TX descriptors. (tx_rs_thresh=%u"
1111 " port=%d queue=%d)",
1112 (unsigned int)tx_rs_thresh,
1113 (int)dev->data->port_id,
1117 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1118 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1119 "tx_rs_thresh is greater than 1. "
1120 "(tx_rs_thresh=%u port=%d queue=%d)",
1121 (unsigned int)tx_rs_thresh,
1122 (int)dev->data->port_id,
1127 /* Free memory if needed. */
1128 if (dev->data->tx_queues[queue_idx]) {
1129 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1130 dev->data->tx_queues[queue_idx] = NULL;
1133 /* Allocate the TX queue data structure. */
1134 txq = rte_zmalloc_socket(NULL,
1135 sizeof(struct ice_tx_queue),
1136 RTE_CACHE_LINE_SIZE,
1139 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1140 "tx queue structure");
1144 /* Allocate TX hardware ring descriptors. */
1145 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1146 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1147 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1148 ring_size, ICE_RING_BASE_ALIGN,
1151 ice_tx_queue_release(txq);
1152 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1156 txq->nb_tx_desc = nb_desc;
1157 txq->tx_rs_thresh = tx_rs_thresh;
1158 txq->tx_free_thresh = tx_free_thresh;
1159 txq->pthresh = tx_conf->tx_thresh.pthresh;
1160 txq->hthresh = tx_conf->tx_thresh.hthresh;
1161 txq->wthresh = tx_conf->tx_thresh.wthresh;
1162 txq->queue_id = queue_idx;
1164 txq->reg_idx = vsi->base_queue + queue_idx;
1165 txq->port_id = dev->data->port_id;
1166 txq->offloads = offloads;
1168 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1170 txq->tx_ring_dma = tz->iova;
1171 txq->tx_ring = tz->addr;
1173 /* Allocate software ring */
1175 rte_zmalloc_socket(NULL,
1176 sizeof(struct ice_tx_entry) * nb_desc,
1177 RTE_CACHE_LINE_SIZE,
1179 if (!txq->sw_ring) {
1180 ice_tx_queue_release(txq);
1181 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1185 ice_reset_tx_queue(txq);
1187 dev->data->tx_queues[queue_idx] = txq;
1188 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1189 ice_set_tx_function_flag(dev, txq);
1195 ice_tx_queue_release(void *txq)
1197 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1200 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1204 ice_tx_queue_release_mbufs(q);
1205 rte_free(q->sw_ring);
1210 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1211 struct rte_eth_rxq_info *qinfo)
1213 struct ice_rx_queue *rxq;
1215 rxq = dev->data->rx_queues[queue_id];
1217 qinfo->mp = rxq->mp;
1218 qinfo->scattered_rx = dev->data->scattered_rx;
1219 qinfo->nb_desc = rxq->nb_rx_desc;
1221 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1222 qinfo->conf.rx_drop_en = rxq->drop_en;
1223 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1227 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1228 struct rte_eth_txq_info *qinfo)
1230 struct ice_tx_queue *txq;
1232 txq = dev->data->tx_queues[queue_id];
1234 qinfo->nb_desc = txq->nb_tx_desc;
1236 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1237 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1238 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1240 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1241 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1242 qinfo->conf.offloads = txq->offloads;
1243 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1247 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1249 #define ICE_RXQ_SCAN_INTERVAL 4
1250 volatile union ice_rx_flex_desc *rxdp;
1251 struct ice_rx_queue *rxq;
1254 rxq = dev->data->rx_queues[rx_queue_id];
1255 rxdp = &rxq->rx_ring[rxq->rx_tail];
1256 while ((desc < rxq->nb_rx_desc) &&
1257 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1258 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1260 * Check the DD bit of a rx descriptor of each 4 in a group,
1261 * to avoid checking too frequently and downgrading performance
1264 desc += ICE_RXQ_SCAN_INTERVAL;
1265 rxdp += ICE_RXQ_SCAN_INTERVAL;
1266 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1267 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1268 desc - rxq->nb_rx_desc]);
1274 #define ICE_RX_FLEX_ERR0_BITS \
1275 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1276 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1277 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1278 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1279 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1280 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1282 /* Rx L3/L4 checksum */
1283 static inline uint64_t
1284 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1288 /* check if HW has decoded the packet and checksum */
1289 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1292 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1293 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1297 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1298 flags |= PKT_RX_IP_CKSUM_BAD;
1300 flags |= PKT_RX_IP_CKSUM_GOOD;
1302 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1303 flags |= PKT_RX_L4_CKSUM_BAD;
1305 flags |= PKT_RX_L4_CKSUM_GOOD;
1307 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1308 flags |= PKT_RX_EIP_CKSUM_BAD;
1314 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1316 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1317 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1318 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1320 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1321 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1322 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1327 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1328 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1329 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1330 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1331 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1332 mb->vlan_tci_outer = mb->vlan_tci;
1333 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1334 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1335 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1336 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1338 mb->vlan_tci_outer = 0;
1341 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1342 mb->vlan_tci, mb->vlan_tci_outer);
1345 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1346 #define ICE_RX_PROTO_XTR_VALID \
1347 ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
1348 (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
1351 ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
1352 volatile struct ice_32b_rx_flex_desc_comms *desc)
1354 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
1358 if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID)))
1361 ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid);
1362 if (unlikely(!ol_flag))
1365 mb->ol_flags |= ol_flag;
1367 metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ?
1368 rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0;
1370 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)))
1371 metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
1373 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
1378 ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
1379 volatile union ice_rx_flex_desc *rxdp)
1381 volatile struct ice_32b_rx_flex_desc_comms *desc =
1382 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
1385 stat_err = rte_le_to_cpu_16(desc->status_error0);
1386 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
1387 mb->ol_flags |= PKT_RX_RSS_HASH;
1388 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
1391 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1392 if (desc->flow_id != 0xFFFFFFFF) {
1393 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1394 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
1397 if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
1398 ice_rxd_to_proto_xtr(mb, desc);
1402 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1403 #define ICE_LOOK_AHEAD 8
1404 #if (ICE_LOOK_AHEAD != 8)
1405 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1408 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1410 volatile union ice_rx_flex_desc *rxdp;
1411 struct ice_rx_entry *rxep;
1412 struct rte_mbuf *mb;
1415 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1416 int32_t i, j, nb_rx = 0;
1417 uint64_t pkt_flags = 0;
1418 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1420 rxdp = &rxq->rx_ring[rxq->rx_tail];
1421 rxep = &rxq->sw_ring[rxq->rx_tail];
1423 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1425 /* Make sure there is at least 1 packet to receive */
1426 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1430 * Scan LOOK_AHEAD descriptors at a time to determine which
1431 * descriptors reference packets that are ready to be received.
1433 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1434 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1435 /* Read desc statuses backwards to avoid race condition */
1436 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1437 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1441 /* Compute how many status bits were set */
1442 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1443 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1447 /* Translate descriptor info to mbuf parameters */
1448 for (j = 0; j < nb_dd; j++) {
1450 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1451 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1452 mb->data_len = pkt_len;
1453 mb->pkt_len = pkt_len;
1455 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1456 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1457 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1458 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1459 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1460 ice_rxd_to_pkt_fields(mb, &rxdp[j]);
1462 mb->ol_flags |= pkt_flags;
1465 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1466 rxq->rx_stage[i + j] = rxep[j].mbuf;
1468 if (nb_dd != ICE_LOOK_AHEAD)
1472 /* Clear software ring entries */
1473 for (i = 0; i < nb_rx; i++)
1474 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1476 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1477 "port_id=%u, queue_id=%u, nb_rx=%d",
1478 rxq->port_id, rxq->queue_id, nb_rx);
1483 static inline uint16_t
1484 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1485 struct rte_mbuf **rx_pkts,
1489 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1491 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1493 for (i = 0; i < nb_pkts; i++)
1494 rx_pkts[i] = stage[i];
1496 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1497 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1503 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1505 volatile union ice_rx_flex_desc *rxdp;
1506 struct ice_rx_entry *rxep;
1507 struct rte_mbuf *mb;
1508 uint16_t alloc_idx, i;
1512 /* Allocate buffers in bulk */
1513 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1514 (rxq->rx_free_thresh - 1));
1515 rxep = &rxq->sw_ring[alloc_idx];
1516 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1517 rxq->rx_free_thresh);
1518 if (unlikely(diag != 0)) {
1519 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1523 rxdp = &rxq->rx_ring[alloc_idx];
1524 for (i = 0; i < rxq->rx_free_thresh; i++) {
1525 if (likely(i < (rxq->rx_free_thresh - 1)))
1526 /* Prefetch next mbuf */
1527 rte_prefetch0(rxep[i + 1].mbuf);
1530 rte_mbuf_refcnt_set(mb, 1);
1532 mb->data_off = RTE_PKTMBUF_HEADROOM;
1534 mb->port = rxq->port_id;
1535 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1536 rxdp[i].read.hdr_addr = 0;
1537 rxdp[i].read.pkt_addr = dma_addr;
1540 /* Update rx tail regsiter */
1541 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1543 rxq->rx_free_trigger =
1544 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1545 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1546 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1551 static inline uint16_t
1552 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1554 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1556 struct rte_eth_dev *dev;
1561 if (rxq->rx_nb_avail)
1562 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1564 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1565 rxq->rx_next_avail = 0;
1566 rxq->rx_nb_avail = nb_rx;
1567 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1569 if (rxq->rx_tail > rxq->rx_free_trigger) {
1570 if (ice_rx_alloc_bufs(rxq) != 0) {
1573 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1574 dev->data->rx_mbuf_alloc_failed +=
1575 rxq->rx_free_thresh;
1576 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1577 "port_id=%u, queue_id=%u",
1578 rxq->port_id, rxq->queue_id);
1579 rxq->rx_nb_avail = 0;
1580 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1581 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1582 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1588 if (rxq->rx_tail >= rxq->nb_rx_desc)
1591 if (rxq->rx_nb_avail)
1592 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1598 ice_recv_pkts_bulk_alloc(void *rx_queue,
1599 struct rte_mbuf **rx_pkts,
1606 if (unlikely(nb_pkts == 0))
1609 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1610 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1613 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1614 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1615 nb_rx = (uint16_t)(nb_rx + count);
1616 nb_pkts = (uint16_t)(nb_pkts - count);
1625 ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
1626 struct rte_mbuf __rte_unused **rx_pkts,
1627 uint16_t __rte_unused nb_pkts)
1631 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
1634 ice_recv_scattered_pkts(void *rx_queue,
1635 struct rte_mbuf **rx_pkts,
1638 struct ice_rx_queue *rxq = rx_queue;
1639 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1640 volatile union ice_rx_flex_desc *rxdp;
1641 union ice_rx_flex_desc rxd;
1642 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1643 struct ice_rx_entry *rxe;
1644 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1645 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1646 struct rte_mbuf *nmb; /* new allocated mbuf */
1647 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1648 uint16_t rx_id = rxq->rx_tail;
1650 uint16_t nb_hold = 0;
1651 uint16_t rx_packet_len;
1652 uint16_t rx_stat_err0;
1655 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1656 struct rte_eth_dev *dev;
1658 while (nb_rx < nb_pkts) {
1659 rxdp = &rx_ring[rx_id];
1660 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1662 /* Check the DD bit first */
1663 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1667 nmb = rte_mbuf_raw_alloc(rxq->mp);
1668 if (unlikely(!nmb)) {
1669 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1670 dev->data->rx_mbuf_alloc_failed++;
1673 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1676 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1678 if (unlikely(rx_id == rxq->nb_rx_desc))
1681 /* Prefetch next mbuf */
1682 rte_prefetch0(sw_ring[rx_id].mbuf);
1685 * When next RX descriptor is on a cache line boundary,
1686 * prefetch the next 4 RX descriptors and next 8 pointers
1689 if ((rx_id & 0x3) == 0) {
1690 rte_prefetch0(&rx_ring[rx_id]);
1691 rte_prefetch0(&sw_ring[rx_id]);
1697 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1699 /* Set data buffer address and data length of the mbuf */
1700 rxdp->read.hdr_addr = 0;
1701 rxdp->read.pkt_addr = dma_addr;
1702 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1703 ICE_RX_FLX_DESC_PKT_LEN_M;
1704 rxm->data_len = rx_packet_len;
1705 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1708 * If this is the first buffer of the received packet, set the
1709 * pointer to the first mbuf of the packet and initialize its
1710 * context. Otherwise, update the total length and the number
1711 * of segments of the current scattered packet, and update the
1712 * pointer to the last mbuf of the current packet.
1716 first_seg->nb_segs = 1;
1717 first_seg->pkt_len = rx_packet_len;
1719 first_seg->pkt_len =
1720 (uint16_t)(first_seg->pkt_len +
1722 first_seg->nb_segs++;
1723 last_seg->next = rxm;
1727 * If this is not the last buffer of the received packet,
1728 * update the pointer to the last mbuf of the current scattered
1729 * packet and continue to parse the RX ring.
1731 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1737 * This is the last buffer of the received packet. If the CRC
1738 * is not stripped by the hardware:
1739 * - Subtract the CRC length from the total packet length.
1740 * - If the last buffer only contains the whole CRC or a part
1741 * of it, free the mbuf associated to the last buffer. If part
1742 * of the CRC is also contained in the previous mbuf, subtract
1743 * the length of that CRC part from the data length of the
1747 if (unlikely(rxq->crc_len > 0)) {
1748 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1749 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1750 rte_pktmbuf_free_seg(rxm);
1751 first_seg->nb_segs--;
1752 last_seg->data_len =
1753 (uint16_t)(last_seg->data_len -
1754 (RTE_ETHER_CRC_LEN - rx_packet_len));
1755 last_seg->next = NULL;
1757 rxm->data_len = (uint16_t)(rx_packet_len -
1761 first_seg->port = rxq->port_id;
1762 first_seg->ol_flags = 0;
1763 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1764 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1765 ice_rxd_to_vlan_tci(first_seg, &rxd);
1766 ice_rxd_to_pkt_fields(first_seg, &rxd);
1767 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1768 first_seg->ol_flags |= pkt_flags;
1769 /* Prefetch data of first segment, if configured to do so. */
1770 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1771 first_seg->data_off));
1772 rx_pkts[nb_rx++] = first_seg;
1776 /* Record index of the next RX descriptor to probe. */
1777 rxq->rx_tail = rx_id;
1778 rxq->pkt_first_seg = first_seg;
1779 rxq->pkt_last_seg = last_seg;
1782 * If the number of free RX descriptors is greater than the RX free
1783 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1784 * register. Update the RDT with the value of the last processed RX
1785 * descriptor minus 1, to guarantee that the RDT register is never
1786 * equal to the RDH register, which creates a "full" ring situtation
1787 * from the hardware point of view.
1789 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1790 if (nb_hold > rxq->rx_free_thresh) {
1791 rx_id = (uint16_t)(rx_id == 0 ?
1792 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1793 /* write TAIL register */
1794 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1797 rxq->nb_rx_hold = nb_hold;
1799 /* return received packet in the burst */
1804 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1806 struct ice_adapter *ad =
1807 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1808 const uint32_t *ptypes;
1810 static const uint32_t ptypes_os[] = {
1811 /* refers to ice_get_default_pkt_type() */
1813 RTE_PTYPE_L2_ETHER_TIMESYNC,
1814 RTE_PTYPE_L2_ETHER_LLDP,
1815 RTE_PTYPE_L2_ETHER_ARP,
1816 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1817 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1820 RTE_PTYPE_L4_NONFRAG,
1824 RTE_PTYPE_TUNNEL_GRENAT,
1825 RTE_PTYPE_TUNNEL_IP,
1826 RTE_PTYPE_INNER_L2_ETHER,
1827 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1828 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1829 RTE_PTYPE_INNER_L4_FRAG,
1830 RTE_PTYPE_INNER_L4_ICMP,
1831 RTE_PTYPE_INNER_L4_NONFRAG,
1832 RTE_PTYPE_INNER_L4_SCTP,
1833 RTE_PTYPE_INNER_L4_TCP,
1834 RTE_PTYPE_INNER_L4_UDP,
1838 static const uint32_t ptypes_comms[] = {
1839 /* refers to ice_get_default_pkt_type() */
1841 RTE_PTYPE_L2_ETHER_TIMESYNC,
1842 RTE_PTYPE_L2_ETHER_LLDP,
1843 RTE_PTYPE_L2_ETHER_ARP,
1844 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1845 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1848 RTE_PTYPE_L4_NONFRAG,
1852 RTE_PTYPE_TUNNEL_GRENAT,
1853 RTE_PTYPE_TUNNEL_IP,
1854 RTE_PTYPE_INNER_L2_ETHER,
1855 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1856 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1857 RTE_PTYPE_INNER_L4_FRAG,
1858 RTE_PTYPE_INNER_L4_ICMP,
1859 RTE_PTYPE_INNER_L4_NONFRAG,
1860 RTE_PTYPE_INNER_L4_SCTP,
1861 RTE_PTYPE_INNER_L4_TCP,
1862 RTE_PTYPE_INNER_L4_UDP,
1863 RTE_PTYPE_TUNNEL_GTPC,
1864 RTE_PTYPE_TUNNEL_GTPU,
1865 RTE_PTYPE_L2_ETHER_PPPOE,
1869 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1870 ptypes = ptypes_comms;
1874 if (dev->rx_pkt_burst == ice_recv_pkts ||
1875 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1876 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1878 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1882 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1883 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1884 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1885 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1893 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1895 volatile union ice_rx_flex_desc *rxdp;
1896 struct ice_rx_queue *rxq = rx_queue;
1899 if (unlikely(offset >= rxq->nb_rx_desc))
1902 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1903 return RTE_ETH_RX_DESC_UNAVAIL;
1905 desc = rxq->rx_tail + offset;
1906 if (desc >= rxq->nb_rx_desc)
1907 desc -= rxq->nb_rx_desc;
1909 rxdp = &rxq->rx_ring[desc];
1910 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1911 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
1912 return RTE_ETH_RX_DESC_DONE;
1914 return RTE_ETH_RX_DESC_AVAIL;
1918 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1920 struct ice_tx_queue *txq = tx_queue;
1921 volatile uint64_t *status;
1922 uint64_t mask, expect;
1925 if (unlikely(offset >= txq->nb_tx_desc))
1928 desc = txq->tx_tail + offset;
1929 /* go to next desc that has the RS bit */
1930 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1932 if (desc >= txq->nb_tx_desc) {
1933 desc -= txq->nb_tx_desc;
1934 if (desc >= txq->nb_tx_desc)
1935 desc -= txq->nb_tx_desc;
1938 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1939 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1940 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1941 ICE_TXD_QW1_DTYPE_S);
1942 if ((*status & mask) == expect)
1943 return RTE_ETH_TX_DESC_DONE;
1945 return RTE_ETH_TX_DESC_FULL;
1949 ice_clear_queues(struct rte_eth_dev *dev)
1953 PMD_INIT_FUNC_TRACE();
1955 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1956 ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
1957 ice_reset_tx_queue(dev->data->tx_queues[i]);
1960 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1961 ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
1962 ice_reset_rx_queue(dev->data->rx_queues[i]);
1967 ice_free_queues(struct rte_eth_dev *dev)
1971 PMD_INIT_FUNC_TRACE();
1973 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1974 if (!dev->data->rx_queues[i])
1976 ice_rx_queue_release(dev->data->rx_queues[i]);
1977 dev->data->rx_queues[i] = NULL;
1979 dev->data->nb_rx_queues = 0;
1981 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1982 if (!dev->data->tx_queues[i])
1984 ice_tx_queue_release(dev->data->tx_queues[i]);
1985 dev->data->tx_queues[i] = NULL;
1987 dev->data->nb_tx_queues = 0;
1990 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
1991 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
1994 ice_fdir_setup_tx_resources(struct ice_pf *pf)
1996 struct ice_tx_queue *txq;
1997 const struct rte_memzone *tz = NULL;
1999 struct rte_eth_dev *dev;
2002 PMD_DRV_LOG(ERR, "PF is not available");
2006 dev = pf->adapter->eth_dev;
2008 /* Allocate the TX queue data structure. */
2009 txq = rte_zmalloc_socket("ice fdir tx queue",
2010 sizeof(struct ice_tx_queue),
2011 RTE_CACHE_LINE_SIZE,
2014 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2015 "tx queue structure.");
2019 /* Allocate TX hardware ring descriptors. */
2020 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2021 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2023 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2024 ICE_FDIR_QUEUE_ID, ring_size,
2025 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2027 ice_tx_queue_release(txq);
2028 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2032 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2033 txq->queue_id = ICE_FDIR_QUEUE_ID;
2034 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2035 txq->vsi = pf->fdir.fdir_vsi;
2037 txq->tx_ring_dma = tz->iova;
2038 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2040 * don't need to allocate software ring and reset for the fdir
2041 * program queue just set the queue has been configured.
2046 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2052 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2054 struct ice_rx_queue *rxq;
2055 const struct rte_memzone *rz = NULL;
2057 struct rte_eth_dev *dev;
2060 PMD_DRV_LOG(ERR, "PF is not available");
2064 dev = pf->adapter->eth_dev;
2066 /* Allocate the RX queue data structure. */
2067 rxq = rte_zmalloc_socket("ice fdir rx queue",
2068 sizeof(struct ice_rx_queue),
2069 RTE_CACHE_LINE_SIZE,
2072 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2073 "rx queue structure.");
2077 /* Allocate RX hardware ring descriptors. */
2078 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2079 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2081 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2082 ICE_FDIR_QUEUE_ID, ring_size,
2083 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2085 ice_rx_queue_release(rxq);
2086 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2090 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2091 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2092 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2093 rxq->vsi = pf->fdir.fdir_vsi;
2095 rxq->rx_ring_dma = rz->iova;
2096 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2097 sizeof(union ice_32byte_rx_desc));
2098 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2101 * Don't need to allocate software ring and reset for the fdir
2102 * rx queue, just set the queue has been configured.
2107 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2113 ice_recv_pkts(void *rx_queue,
2114 struct rte_mbuf **rx_pkts,
2117 struct ice_rx_queue *rxq = rx_queue;
2118 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2119 volatile union ice_rx_flex_desc *rxdp;
2120 union ice_rx_flex_desc rxd;
2121 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2122 struct ice_rx_entry *rxe;
2123 struct rte_mbuf *nmb; /* new allocated mbuf */
2124 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2125 uint16_t rx_id = rxq->rx_tail;
2127 uint16_t nb_hold = 0;
2128 uint16_t rx_packet_len;
2129 uint16_t rx_stat_err0;
2132 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2133 struct rte_eth_dev *dev;
2135 while (nb_rx < nb_pkts) {
2136 rxdp = &rx_ring[rx_id];
2137 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2139 /* Check the DD bit first */
2140 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2144 nmb = rte_mbuf_raw_alloc(rxq->mp);
2145 if (unlikely(!nmb)) {
2146 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2147 dev->data->rx_mbuf_alloc_failed++;
2150 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2153 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2155 if (unlikely(rx_id == rxq->nb_rx_desc))
2160 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2163 * fill the read format of descriptor with physic address in
2164 * new allocated mbuf: nmb
2166 rxdp->read.hdr_addr = 0;
2167 rxdp->read.pkt_addr = dma_addr;
2169 /* calculate rx_packet_len of the received pkt */
2170 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2171 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2173 /* fill old mbuf with received descriptor: rxd */
2174 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2175 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2178 rxm->pkt_len = rx_packet_len;
2179 rxm->data_len = rx_packet_len;
2180 rxm->port = rxq->port_id;
2181 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2182 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2183 ice_rxd_to_vlan_tci(rxm, &rxd);
2184 ice_rxd_to_pkt_fields(rxm, &rxd);
2185 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2186 rxm->ol_flags |= pkt_flags;
2187 /* copy old mbuf to rx_pkts */
2188 rx_pkts[nb_rx++] = rxm;
2190 rxq->rx_tail = rx_id;
2192 * If the number of free RX descriptors is greater than the RX free
2193 * threshold of the queue, advance the receive tail register of queue.
2194 * Update that register with the value of the last processed RX
2195 * descriptor minus 1.
2197 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2198 if (nb_hold > rxq->rx_free_thresh) {
2199 rx_id = (uint16_t)(rx_id == 0 ?
2200 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2201 /* write TAIL register */
2202 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
2205 rxq->nb_rx_hold = nb_hold;
2207 /* return received packet in the burst */
2212 ice_parse_tunneling_params(uint64_t ol_flags,
2213 union ice_tx_offload tx_offload,
2214 uint32_t *cd_tunneling)
2216 /* EIPT: External (outer) IP header type */
2217 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2218 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2219 else if (ol_flags & PKT_TX_OUTER_IPV4)
2220 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2221 else if (ol_flags & PKT_TX_OUTER_IPV6)
2222 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2224 /* EIPLEN: External (outer) IP header length, in DWords */
2225 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2226 ICE_TXD_CTX_QW0_EIPLEN_S;
2228 /* L4TUNT: L4 Tunneling Type */
2229 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2230 case PKT_TX_TUNNEL_IPIP:
2231 /* for non UDP / GRE tunneling, set to 00b */
2233 case PKT_TX_TUNNEL_VXLAN:
2234 case PKT_TX_TUNNEL_GTP:
2235 case PKT_TX_TUNNEL_GENEVE:
2236 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2238 case PKT_TX_TUNNEL_GRE:
2239 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2242 PMD_TX_LOG(ERR, "Tunnel type not supported");
2246 /* L4TUNLEN: L4 Tunneling Length, in Words
2248 * We depend on app to set rte_mbuf.l2_len correctly.
2249 * For IP in GRE it should be set to the length of the GRE
2251 * For MAC in GRE or MAC in UDP it should be set to the length
2252 * of the GRE or UDP headers plus the inner MAC up to including
2253 * its last Ethertype.
2254 * If MPLS labels exists, it should include them as well.
2256 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2257 ICE_TXD_CTX_QW0_NATLEN_S;
2259 if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
2260 (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2261 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2262 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2266 ice_txd_enable_checksum(uint64_t ol_flags,
2268 uint32_t *td_offset,
2269 union ice_tx_offload tx_offload)
2272 if (ol_flags & PKT_TX_TUNNEL_MASK)
2273 *td_offset |= (tx_offload.outer_l2_len >> 1)
2274 << ICE_TX_DESC_LEN_MACLEN_S;
2276 *td_offset |= (tx_offload.l2_len >> 1)
2277 << ICE_TX_DESC_LEN_MACLEN_S;
2279 /* Enable L3 checksum offloads */
2280 if (ol_flags & PKT_TX_IP_CKSUM) {
2281 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2282 *td_offset |= (tx_offload.l3_len >> 2) <<
2283 ICE_TX_DESC_LEN_IPLEN_S;
2284 } else if (ol_flags & PKT_TX_IPV4) {
2285 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2286 *td_offset |= (tx_offload.l3_len >> 2) <<
2287 ICE_TX_DESC_LEN_IPLEN_S;
2288 } else if (ol_flags & PKT_TX_IPV6) {
2289 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2290 *td_offset |= (tx_offload.l3_len >> 2) <<
2291 ICE_TX_DESC_LEN_IPLEN_S;
2294 if (ol_flags & PKT_TX_TCP_SEG) {
2295 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2296 *td_offset |= (tx_offload.l4_len >> 2) <<
2297 ICE_TX_DESC_LEN_L4_LEN_S;
2301 /* Enable L4 checksum offloads */
2302 switch (ol_flags & PKT_TX_L4_MASK) {
2303 case PKT_TX_TCP_CKSUM:
2304 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2305 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2306 ICE_TX_DESC_LEN_L4_LEN_S;
2308 case PKT_TX_SCTP_CKSUM:
2309 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2310 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2311 ICE_TX_DESC_LEN_L4_LEN_S;
2313 case PKT_TX_UDP_CKSUM:
2314 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2315 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2316 ICE_TX_DESC_LEN_L4_LEN_S;
2324 ice_xmit_cleanup(struct ice_tx_queue *txq)
2326 struct ice_tx_entry *sw_ring = txq->sw_ring;
2327 volatile struct ice_tx_desc *txd = txq->tx_ring;
2328 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2329 uint16_t nb_tx_desc = txq->nb_tx_desc;
2330 uint16_t desc_to_clean_to;
2331 uint16_t nb_tx_to_clean;
2333 /* Determine the last descriptor needing to be cleaned */
2334 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2335 if (desc_to_clean_to >= nb_tx_desc)
2336 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2338 /* Check to make sure the last descriptor to clean is done */
2339 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2340 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2341 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2342 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
2343 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2345 txq->port_id, txq->queue_id,
2346 txd[desc_to_clean_to].cmd_type_offset_bsz);
2347 /* Failed to clean any descriptors */
2351 /* Figure out how many descriptors will be cleaned */
2352 if (last_desc_cleaned > desc_to_clean_to)
2353 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2356 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2359 /* The last descriptor to clean is done, so that means all the
2360 * descriptors from the last descriptor that was cleaned
2361 * up to the last descriptor with the RS bit set
2362 * are done. Only reset the threshold descriptor.
2364 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2366 /* Update the txq to reflect the last descriptor that was cleaned */
2367 txq->last_desc_cleaned = desc_to_clean_to;
2368 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2373 /* Construct the tx flags */
2374 static inline uint64_t
2375 ice_build_ctob(uint32_t td_cmd,
2380 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2381 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2382 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2383 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2384 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2387 /* Check if the context descriptor is needed for TX offloading */
2388 static inline uint16_t
2389 ice_calc_context_desc(uint64_t flags)
2391 static uint64_t mask = PKT_TX_TCP_SEG |
2393 PKT_TX_OUTER_IP_CKSUM |
2396 return (flags & mask) ? 1 : 0;
2399 /* set ice TSO context descriptor */
2400 static inline uint64_t
2401 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2403 uint64_t ctx_desc = 0;
2404 uint32_t cd_cmd, hdr_len, cd_tso_len;
2406 if (!tx_offload.l4_len) {
2407 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2411 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2412 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2413 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2415 cd_cmd = ICE_TX_CTX_DESC_TSO;
2416 cd_tso_len = mbuf->pkt_len - hdr_len;
2417 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2418 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2419 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2425 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2427 struct ice_tx_queue *txq;
2428 volatile struct ice_tx_desc *tx_ring;
2429 volatile struct ice_tx_desc *txd;
2430 struct ice_tx_entry *sw_ring;
2431 struct ice_tx_entry *txe, *txn;
2432 struct rte_mbuf *tx_pkt;
2433 struct rte_mbuf *m_seg;
2434 uint32_t cd_tunneling_params;
2439 uint32_t td_cmd = 0;
2440 uint32_t td_offset = 0;
2441 uint32_t td_tag = 0;
2443 uint64_t buf_dma_addr;
2445 union ice_tx_offload tx_offload = {0};
2448 sw_ring = txq->sw_ring;
2449 tx_ring = txq->tx_ring;
2450 tx_id = txq->tx_tail;
2451 txe = &sw_ring[tx_id];
2453 /* Check if the descriptor ring needs to be cleaned. */
2454 if (txq->nb_tx_free < txq->tx_free_thresh)
2455 ice_xmit_cleanup(txq);
2457 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2458 tx_pkt = *tx_pkts++;
2461 ol_flags = tx_pkt->ol_flags;
2462 tx_offload.l2_len = tx_pkt->l2_len;
2463 tx_offload.l3_len = tx_pkt->l3_len;
2464 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2465 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2466 tx_offload.l4_len = tx_pkt->l4_len;
2467 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2468 /* Calculate the number of context descriptors needed. */
2469 nb_ctx = ice_calc_context_desc(ol_flags);
2471 /* The number of descriptors that must be allocated for
2472 * a packet equals to the number of the segments of that
2473 * packet plus the number of context descriptor if needed.
2475 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2476 tx_last = (uint16_t)(tx_id + nb_used - 1);
2479 if (tx_last >= txq->nb_tx_desc)
2480 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2482 if (nb_used > txq->nb_tx_free) {
2483 if (ice_xmit_cleanup(txq) != 0) {
2488 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2489 while (nb_used > txq->nb_tx_free) {
2490 if (ice_xmit_cleanup(txq) != 0) {
2499 /* Descriptor based VLAN insertion */
2500 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2501 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2502 td_tag = tx_pkt->vlan_tci;
2505 /* Fill in tunneling parameters if necessary */
2506 cd_tunneling_params = 0;
2507 if (ol_flags & PKT_TX_TUNNEL_MASK)
2508 ice_parse_tunneling_params(ol_flags, tx_offload,
2509 &cd_tunneling_params);
2511 /* Enable checksum offloading */
2512 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
2513 ice_txd_enable_checksum(ol_flags, &td_cmd,
2514 &td_offset, tx_offload);
2518 /* Setup TX context descriptor if required */
2519 volatile struct ice_tx_ctx_desc *ctx_txd =
2520 (volatile struct ice_tx_ctx_desc *)
2522 uint16_t cd_l2tag2 = 0;
2523 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2525 txn = &sw_ring[txe->next_id];
2526 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2528 rte_pktmbuf_free_seg(txe->mbuf);
2532 if (ol_flags & PKT_TX_TCP_SEG)
2533 cd_type_cmd_tso_mss |=
2534 ice_set_tso_ctx(tx_pkt, tx_offload);
2536 ctx_txd->tunneling_params =
2537 rte_cpu_to_le_32(cd_tunneling_params);
2539 /* TX context descriptor based double VLAN insert */
2540 if (ol_flags & PKT_TX_QINQ) {
2541 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2542 cd_type_cmd_tso_mss |=
2543 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2544 ICE_TXD_CTX_QW1_CMD_S);
2546 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2548 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2550 txe->last_id = tx_last;
2551 tx_id = txe->next_id;
2557 txd = &tx_ring[tx_id];
2558 txn = &sw_ring[txe->next_id];
2561 rte_pktmbuf_free_seg(txe->mbuf);
2564 /* Setup TX Descriptor */
2565 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2566 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2567 txd->cmd_type_offset_bsz =
2568 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2569 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2570 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2571 ((uint64_t)m_seg->data_len <<
2572 ICE_TXD_QW1_TX_BUF_SZ_S) |
2573 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2575 txe->last_id = tx_last;
2576 tx_id = txe->next_id;
2578 m_seg = m_seg->next;
2581 /* fill the last descriptor with End of Packet (EOP) bit */
2582 td_cmd |= ICE_TX_DESC_CMD_EOP;
2583 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2584 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2586 /* set RS bit on the last descriptor of one packet */
2587 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2588 PMD_TX_FREE_LOG(DEBUG,
2589 "Setting RS bit on TXD id="
2590 "%4u (port=%d queue=%d)",
2591 tx_last, txq->port_id, txq->queue_id);
2593 td_cmd |= ICE_TX_DESC_CMD_RS;
2595 /* Update txq RS bit counters */
2596 txq->nb_tx_used = 0;
2598 txd->cmd_type_offset_bsz |=
2599 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2603 /* update Tail register */
2604 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2605 txq->tx_tail = tx_id;
2610 static inline int __attribute__((always_inline))
2611 ice_tx_free_bufs(struct ice_tx_queue *txq)
2613 struct ice_tx_entry *txep;
2616 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2617 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2618 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2621 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2623 for (i = 0; i < txq->tx_rs_thresh; i++)
2624 rte_prefetch0((txep + i)->mbuf);
2626 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2627 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2628 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2632 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2633 rte_pktmbuf_free_seg(txep->mbuf);
2638 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2639 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2640 if (txq->tx_next_dd >= txq->nb_tx_desc)
2641 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2643 return txq->tx_rs_thresh;
2647 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2650 struct ice_tx_entry *swr_ring = txq->sw_ring;
2651 uint16_t i, tx_last, tx_id;
2652 uint16_t nb_tx_free_last;
2653 uint16_t nb_tx_to_clean;
2656 /* Start free mbuf from the next of tx_tail */
2657 tx_last = txq->tx_tail;
2658 tx_id = swr_ring[tx_last].next_id;
2660 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2663 nb_tx_to_clean = txq->nb_tx_free;
2664 nb_tx_free_last = txq->nb_tx_free;
2666 free_cnt = txq->nb_tx_desc;
2668 /* Loop through swr_ring to count the amount of
2669 * freeable mubfs and packets.
2671 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2672 for (i = 0; i < nb_tx_to_clean &&
2673 pkt_cnt < free_cnt &&
2674 tx_id != tx_last; i++) {
2675 if (swr_ring[tx_id].mbuf != NULL) {
2676 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2677 swr_ring[tx_id].mbuf = NULL;
2680 * last segment in the packet,
2681 * increment packet count
2683 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2686 tx_id = swr_ring[tx_id].next_id;
2689 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2690 txq->nb_tx_free || tx_id == tx_last)
2693 if (pkt_cnt < free_cnt) {
2694 if (ice_xmit_cleanup(txq))
2697 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2698 nb_tx_free_last = txq->nb_tx_free;
2702 return (int)pkt_cnt;
2707 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2708 uint32_t free_cnt __rte_unused)
2715 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2720 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2721 free_cnt = txq->nb_tx_desc;
2723 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2725 for (i = 0; i < cnt; i += n) {
2726 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2729 n = ice_tx_free_bufs(txq);
2739 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2741 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2742 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2743 struct ice_adapter *ad =
2744 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2747 if (ad->tx_vec_allowed)
2748 return ice_tx_done_cleanup_vec(q, free_cnt);
2750 if (ad->tx_simple_allowed)
2751 return ice_tx_done_cleanup_simple(q, free_cnt);
2753 return ice_tx_done_cleanup_full(q, free_cnt);
2756 /* Populate 4 descriptors with data from 4 mbufs */
2758 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2763 for (i = 0; i < 4; i++, txdp++, pkts++) {
2764 dma_addr = rte_mbuf_data_iova(*pkts);
2765 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2766 txdp->cmd_type_offset_bsz =
2767 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2768 (*pkts)->data_len, 0);
2772 /* Populate 1 descriptor with data from 1 mbuf */
2774 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2778 dma_addr = rte_mbuf_data_iova(*pkts);
2779 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2780 txdp->cmd_type_offset_bsz =
2781 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2782 (*pkts)->data_len, 0);
2786 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2789 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2790 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2791 const int N_PER_LOOP = 4;
2792 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2793 int mainpart, leftover;
2797 * Process most of the packets in chunks of N pkts. Any
2798 * leftover packets will get processed one at a time.
2800 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2801 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2802 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2803 /* Copy N mbuf pointers to the S/W ring */
2804 for (j = 0; j < N_PER_LOOP; ++j)
2805 (txep + i + j)->mbuf = *(pkts + i + j);
2806 tx4(txdp + i, pkts + i);
2809 if (unlikely(leftover > 0)) {
2810 for (i = 0; i < leftover; ++i) {
2811 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2812 tx1(txdp + mainpart + i, pkts + mainpart + i);
2817 static inline uint16_t
2818 tx_xmit_pkts(struct ice_tx_queue *txq,
2819 struct rte_mbuf **tx_pkts,
2822 volatile struct ice_tx_desc *txr = txq->tx_ring;
2826 * Begin scanning the H/W ring for done descriptors when the number
2827 * of available descriptors drops below tx_free_thresh. For each done
2828 * descriptor, free the associated buffer.
2830 if (txq->nb_tx_free < txq->tx_free_thresh)
2831 ice_tx_free_bufs(txq);
2833 /* Use available descriptor only */
2834 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2835 if (unlikely(!nb_pkts))
2838 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2839 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2840 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2841 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2842 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2843 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2845 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2849 /* Fill hardware descriptor ring with mbuf data */
2850 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2851 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2853 /* Determin if RS bit needs to be set */
2854 if (txq->tx_tail > txq->tx_next_rs) {
2855 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2856 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2859 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2860 if (txq->tx_next_rs >= txq->nb_tx_desc)
2861 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2864 if (txq->tx_tail >= txq->nb_tx_desc)
2867 /* Update the tx tail register */
2868 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2874 ice_xmit_pkts_simple(void *tx_queue,
2875 struct rte_mbuf **tx_pkts,
2880 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2881 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2885 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2888 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2889 &tx_pkts[nb_tx], num);
2890 nb_tx = (uint16_t)(nb_tx + ret);
2891 nb_pkts = (uint16_t)(nb_pkts - ret);
2899 void __attribute__((cold))
2900 ice_set_rx_function(struct rte_eth_dev *dev)
2902 PMD_INIT_FUNC_TRACE();
2903 struct ice_adapter *ad =
2904 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2906 struct ice_rx_queue *rxq;
2908 bool use_avx2 = false;
2910 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2911 if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
2912 ad->rx_vec_allowed = true;
2913 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2914 rxq = dev->data->rx_queues[i];
2915 if (rxq && ice_rxq_vec_setup(rxq)) {
2916 ad->rx_vec_allowed = false;
2921 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2922 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2926 ad->rx_vec_allowed = false;
2930 if (ad->rx_vec_allowed) {
2931 if (dev->data->scattered_rx) {
2933 "Using %sVector Scattered Rx (port %d).",
2934 use_avx2 ? "avx2 " : "",
2935 dev->data->port_id);
2936 dev->rx_pkt_burst = use_avx2 ?
2937 ice_recv_scattered_pkts_vec_avx2 :
2938 ice_recv_scattered_pkts_vec;
2940 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2941 use_avx2 ? "avx2 " : "",
2942 dev->data->port_id);
2943 dev->rx_pkt_burst = use_avx2 ?
2944 ice_recv_pkts_vec_avx2 :
2952 if (dev->data->scattered_rx) {
2953 /* Set the non-LRO scattered function */
2955 "Using a Scattered function on port %d.",
2956 dev->data->port_id);
2957 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2958 } else if (ad->rx_bulk_alloc_allowed) {
2960 "Rx Burst Bulk Alloc Preconditions are "
2961 "satisfied. Rx Burst Bulk Alloc function "
2962 "will be used on port %d.",
2963 dev->data->port_id);
2964 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2967 "Rx Burst Bulk Alloc Preconditions are not "
2968 "satisfied, Normal Rx will be used on port %d.",
2969 dev->data->port_id);
2970 dev->rx_pkt_burst = ice_recv_pkts;
2974 static const struct {
2975 eth_rx_burst_t pkt_burst;
2977 } ice_rx_burst_infos[] = {
2978 { ice_recv_scattered_pkts, "Scalar Scattered" },
2979 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
2980 { ice_recv_pkts, "Scalar" },
2982 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
2983 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
2984 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
2985 { ice_recv_pkts_vec, "Vector SSE" },
2990 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2991 struct rte_eth_burst_mode *mode)
2993 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2997 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
2998 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
2999 snprintf(mode->info, sizeof(mode->info), "%s",
3000 ice_rx_burst_infos[i].info);
3009 void __attribute__((cold))
3010 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3012 struct ice_adapter *ad =
3013 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3015 /* Use a simple Tx queue if possible (only fast free is allowed) */
3016 ad->tx_simple_allowed =
3018 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3019 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3021 if (ad->tx_simple_allowed)
3022 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3026 "Simple Tx can NOT be enabled on Tx queue %u.",
3030 /*********************************************************************
3034 **********************************************************************/
3035 /* The default values of TSO MSS */
3036 #define ICE_MIN_TSO_MSS 64
3037 #define ICE_MAX_TSO_MSS 9728
3038 #define ICE_MAX_TSO_FRAME_SIZE 262144
3040 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3047 for (i = 0; i < nb_pkts; i++) {
3049 ol_flags = m->ol_flags;
3051 if (ol_flags & PKT_TX_TCP_SEG &&
3052 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3053 m->tso_segsz > ICE_MAX_TSO_MSS ||
3054 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3056 * MSS outside the range are considered malicious
3062 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3063 ret = rte_validate_tx_offload(m);
3069 ret = rte_net_intel_cksum_prepare(m);
3078 void __attribute__((cold))
3079 ice_set_tx_function(struct rte_eth_dev *dev)
3081 struct ice_adapter *ad =
3082 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3084 struct ice_tx_queue *txq;
3086 bool use_avx2 = false;
3088 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3089 if (!ice_tx_vec_dev_check(dev)) {
3090 ad->tx_vec_allowed = true;
3091 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3092 txq = dev->data->tx_queues[i];
3093 if (txq && ice_txq_vec_setup(txq)) {
3094 ad->tx_vec_allowed = false;
3099 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3100 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
3104 ad->tx_vec_allowed = false;
3108 if (ad->tx_vec_allowed) {
3109 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3110 use_avx2 ? "avx2 " : "",
3111 dev->data->port_id);
3112 dev->tx_pkt_burst = use_avx2 ?
3113 ice_xmit_pkts_vec_avx2 :
3115 dev->tx_pkt_prepare = NULL;
3121 if (ad->tx_simple_allowed) {
3122 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3123 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3124 dev->tx_pkt_prepare = NULL;
3126 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3127 dev->tx_pkt_burst = ice_xmit_pkts;
3128 dev->tx_pkt_prepare = ice_prep_pkts;
3132 static const struct {
3133 eth_tx_burst_t pkt_burst;
3135 } ice_tx_burst_infos[] = {
3136 { ice_xmit_pkts_simple, "Scalar Simple" },
3137 { ice_xmit_pkts, "Scalar" },
3139 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3140 { ice_xmit_pkts_vec, "Vector SSE" },
3145 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3146 struct rte_eth_burst_mode *mode)
3148 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3152 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3153 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3154 snprintf(mode->info, sizeof(mode->info), "%s",
3155 ice_tx_burst_infos[i].info);
3164 /* For each value it means, datasheet of hardware can tell more details
3166 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3168 static inline uint32_t
3169 ice_get_default_pkt_type(uint16_t ptype)
3171 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3172 __rte_cache_aligned = {
3175 [1] = RTE_PTYPE_L2_ETHER,
3176 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3177 /* [3] - [5] reserved */
3178 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3179 /* [7] - [10] reserved */
3180 [11] = RTE_PTYPE_L2_ETHER_ARP,
3181 /* [12] - [21] reserved */
3183 /* Non tunneled IPv4 */
3184 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3186 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3187 RTE_PTYPE_L4_NONFRAG,
3188 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3191 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3193 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3195 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3199 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3200 RTE_PTYPE_TUNNEL_IP |
3201 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3202 RTE_PTYPE_INNER_L4_FRAG,
3203 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3204 RTE_PTYPE_TUNNEL_IP |
3205 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3206 RTE_PTYPE_INNER_L4_NONFRAG,
3207 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3208 RTE_PTYPE_TUNNEL_IP |
3209 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3210 RTE_PTYPE_INNER_L4_UDP,
3212 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3213 RTE_PTYPE_TUNNEL_IP |
3214 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3215 RTE_PTYPE_INNER_L4_TCP,
3216 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3217 RTE_PTYPE_TUNNEL_IP |
3218 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3219 RTE_PTYPE_INNER_L4_SCTP,
3220 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3221 RTE_PTYPE_TUNNEL_IP |
3222 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3223 RTE_PTYPE_INNER_L4_ICMP,
3226 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3227 RTE_PTYPE_TUNNEL_IP |
3228 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3229 RTE_PTYPE_INNER_L4_FRAG,
3230 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3231 RTE_PTYPE_TUNNEL_IP |
3232 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3233 RTE_PTYPE_INNER_L4_NONFRAG,
3234 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3235 RTE_PTYPE_TUNNEL_IP |
3236 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3237 RTE_PTYPE_INNER_L4_UDP,
3239 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3240 RTE_PTYPE_TUNNEL_IP |
3241 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3242 RTE_PTYPE_INNER_L4_TCP,
3243 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3244 RTE_PTYPE_TUNNEL_IP |
3245 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3246 RTE_PTYPE_INNER_L4_SCTP,
3247 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3248 RTE_PTYPE_TUNNEL_IP |
3249 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3250 RTE_PTYPE_INNER_L4_ICMP,
3252 /* IPv4 --> GRE/Teredo/VXLAN */
3253 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3254 RTE_PTYPE_TUNNEL_GRENAT,
3256 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3257 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3258 RTE_PTYPE_TUNNEL_GRENAT |
3259 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3260 RTE_PTYPE_INNER_L4_FRAG,
3261 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3262 RTE_PTYPE_TUNNEL_GRENAT |
3263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3264 RTE_PTYPE_INNER_L4_NONFRAG,
3265 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3266 RTE_PTYPE_TUNNEL_GRENAT |
3267 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3268 RTE_PTYPE_INNER_L4_UDP,
3270 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3271 RTE_PTYPE_TUNNEL_GRENAT |
3272 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3273 RTE_PTYPE_INNER_L4_TCP,
3274 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3275 RTE_PTYPE_TUNNEL_GRENAT |
3276 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3277 RTE_PTYPE_INNER_L4_SCTP,
3278 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3279 RTE_PTYPE_TUNNEL_GRENAT |
3280 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3281 RTE_PTYPE_INNER_L4_ICMP,
3283 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3284 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3285 RTE_PTYPE_TUNNEL_GRENAT |
3286 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3287 RTE_PTYPE_INNER_L4_FRAG,
3288 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3289 RTE_PTYPE_TUNNEL_GRENAT |
3290 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3291 RTE_PTYPE_INNER_L4_NONFRAG,
3292 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3293 RTE_PTYPE_TUNNEL_GRENAT |
3294 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3295 RTE_PTYPE_INNER_L4_UDP,
3297 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3298 RTE_PTYPE_TUNNEL_GRENAT |
3299 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3300 RTE_PTYPE_INNER_L4_TCP,
3301 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3302 RTE_PTYPE_TUNNEL_GRENAT |
3303 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3304 RTE_PTYPE_INNER_L4_SCTP,
3305 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3306 RTE_PTYPE_TUNNEL_GRENAT |
3307 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3308 RTE_PTYPE_INNER_L4_ICMP,
3310 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3311 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3312 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3314 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3315 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3316 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3317 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3318 RTE_PTYPE_INNER_L4_FRAG,
3319 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3320 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3321 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3322 RTE_PTYPE_INNER_L4_NONFRAG,
3323 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3324 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3325 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3326 RTE_PTYPE_INNER_L4_UDP,
3328 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3329 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3330 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3331 RTE_PTYPE_INNER_L4_TCP,
3332 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3333 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3334 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3335 RTE_PTYPE_INNER_L4_SCTP,
3336 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3337 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3338 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3339 RTE_PTYPE_INNER_L4_ICMP,
3341 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3342 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3343 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3344 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3345 RTE_PTYPE_INNER_L4_FRAG,
3346 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3347 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3348 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3349 RTE_PTYPE_INNER_L4_NONFRAG,
3350 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3351 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3352 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3353 RTE_PTYPE_INNER_L4_UDP,
3355 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3356 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3357 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3358 RTE_PTYPE_INNER_L4_TCP,
3359 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3360 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3361 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3362 RTE_PTYPE_INNER_L4_SCTP,
3363 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3364 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3365 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3366 RTE_PTYPE_INNER_L4_ICMP,
3367 /* [73] - [87] reserved */
3369 /* Non tunneled IPv6 */
3370 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3372 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3373 RTE_PTYPE_L4_NONFRAG,
3374 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3377 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3379 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3381 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3385 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3386 RTE_PTYPE_TUNNEL_IP |
3387 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3388 RTE_PTYPE_INNER_L4_FRAG,
3389 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3390 RTE_PTYPE_TUNNEL_IP |
3391 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3392 RTE_PTYPE_INNER_L4_NONFRAG,
3393 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3394 RTE_PTYPE_TUNNEL_IP |
3395 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3396 RTE_PTYPE_INNER_L4_UDP,
3398 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3399 RTE_PTYPE_TUNNEL_IP |
3400 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3401 RTE_PTYPE_INNER_L4_TCP,
3402 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3403 RTE_PTYPE_TUNNEL_IP |
3404 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3405 RTE_PTYPE_INNER_L4_SCTP,
3406 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3407 RTE_PTYPE_TUNNEL_IP |
3408 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3409 RTE_PTYPE_INNER_L4_ICMP,
3412 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3413 RTE_PTYPE_TUNNEL_IP |
3414 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3415 RTE_PTYPE_INNER_L4_FRAG,
3416 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3417 RTE_PTYPE_TUNNEL_IP |
3418 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3419 RTE_PTYPE_INNER_L4_NONFRAG,
3420 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3421 RTE_PTYPE_TUNNEL_IP |
3422 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3423 RTE_PTYPE_INNER_L4_UDP,
3424 /* [105] reserved */
3425 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3426 RTE_PTYPE_TUNNEL_IP |
3427 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3428 RTE_PTYPE_INNER_L4_TCP,
3429 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3430 RTE_PTYPE_TUNNEL_IP |
3431 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3432 RTE_PTYPE_INNER_L4_SCTP,
3433 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3434 RTE_PTYPE_TUNNEL_IP |
3435 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3436 RTE_PTYPE_INNER_L4_ICMP,
3438 /* IPv6 --> GRE/Teredo/VXLAN */
3439 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3440 RTE_PTYPE_TUNNEL_GRENAT,
3442 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3443 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3444 RTE_PTYPE_TUNNEL_GRENAT |
3445 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3446 RTE_PTYPE_INNER_L4_FRAG,
3447 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3448 RTE_PTYPE_TUNNEL_GRENAT |
3449 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3450 RTE_PTYPE_INNER_L4_NONFRAG,
3451 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3452 RTE_PTYPE_TUNNEL_GRENAT |
3453 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3454 RTE_PTYPE_INNER_L4_UDP,
3455 /* [113] reserved */
3456 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3457 RTE_PTYPE_TUNNEL_GRENAT |
3458 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3459 RTE_PTYPE_INNER_L4_TCP,
3460 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3461 RTE_PTYPE_TUNNEL_GRENAT |
3462 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3463 RTE_PTYPE_INNER_L4_SCTP,
3464 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3465 RTE_PTYPE_TUNNEL_GRENAT |
3466 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3467 RTE_PTYPE_INNER_L4_ICMP,
3469 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3470 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3471 RTE_PTYPE_TUNNEL_GRENAT |
3472 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3473 RTE_PTYPE_INNER_L4_FRAG,
3474 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3475 RTE_PTYPE_TUNNEL_GRENAT |
3476 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3477 RTE_PTYPE_INNER_L4_NONFRAG,
3478 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3479 RTE_PTYPE_TUNNEL_GRENAT |
3480 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3481 RTE_PTYPE_INNER_L4_UDP,
3482 /* [120] reserved */
3483 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3484 RTE_PTYPE_TUNNEL_GRENAT |
3485 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3486 RTE_PTYPE_INNER_L4_TCP,
3487 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3488 RTE_PTYPE_TUNNEL_GRENAT |
3489 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3490 RTE_PTYPE_INNER_L4_SCTP,
3491 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3492 RTE_PTYPE_TUNNEL_GRENAT |
3493 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3494 RTE_PTYPE_INNER_L4_ICMP,
3496 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3497 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3498 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3500 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3501 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3502 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3503 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3504 RTE_PTYPE_INNER_L4_FRAG,
3505 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3506 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3507 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3508 RTE_PTYPE_INNER_L4_NONFRAG,
3509 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3510 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3511 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3512 RTE_PTYPE_INNER_L4_UDP,
3513 /* [128] reserved */
3514 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3515 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3516 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3517 RTE_PTYPE_INNER_L4_TCP,
3518 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3519 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3520 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3521 RTE_PTYPE_INNER_L4_SCTP,
3522 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3523 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3524 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3525 RTE_PTYPE_INNER_L4_ICMP,
3527 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3528 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3529 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3530 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3531 RTE_PTYPE_INNER_L4_FRAG,
3532 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3533 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3534 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3535 RTE_PTYPE_INNER_L4_NONFRAG,
3536 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3537 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3538 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3539 RTE_PTYPE_INNER_L4_UDP,
3540 /* [135] reserved */
3541 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3542 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3543 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3544 RTE_PTYPE_INNER_L4_TCP,
3545 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3546 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3547 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3548 RTE_PTYPE_INNER_L4_SCTP,
3549 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3550 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3551 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3552 RTE_PTYPE_INNER_L4_ICMP,
3553 /* [139] - [299] reserved */
3556 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3557 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3559 /* PPPoE --> IPv4 */
3560 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3561 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3563 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3564 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3565 RTE_PTYPE_L4_NONFRAG,
3566 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3567 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3569 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3570 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3572 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3573 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3575 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3576 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3579 /* PPPoE --> IPv6 */
3580 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3581 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3583 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3584 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3585 RTE_PTYPE_L4_NONFRAG,
3586 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3587 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3589 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3590 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3592 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3593 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3595 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3596 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3598 /* [314] - [324] reserved */
3600 /* IPv4/IPv6 --> GTPC/GTPU */
3601 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3602 RTE_PTYPE_TUNNEL_GTPC,
3603 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3604 RTE_PTYPE_TUNNEL_GTPC,
3605 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3606 RTE_PTYPE_TUNNEL_GTPC,
3607 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3608 RTE_PTYPE_TUNNEL_GTPC,
3609 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3610 RTE_PTYPE_TUNNEL_GTPU,
3611 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3612 RTE_PTYPE_TUNNEL_GTPU,
3614 /* IPv4 --> GTPU --> IPv4 */
3615 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3616 RTE_PTYPE_TUNNEL_GTPU |
3617 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3618 RTE_PTYPE_INNER_L4_FRAG,
3619 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3620 RTE_PTYPE_TUNNEL_GTPU |
3621 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3622 RTE_PTYPE_INNER_L4_NONFRAG,
3623 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3624 RTE_PTYPE_TUNNEL_GTPU |
3625 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3626 RTE_PTYPE_INNER_L4_UDP,
3627 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3628 RTE_PTYPE_TUNNEL_GTPU |
3629 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3630 RTE_PTYPE_INNER_L4_TCP,
3631 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3632 RTE_PTYPE_TUNNEL_GTPU |
3633 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3634 RTE_PTYPE_INNER_L4_ICMP,
3636 /* IPv6 --> GTPU --> IPv4 */
3637 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3638 RTE_PTYPE_TUNNEL_GTPU |
3639 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3640 RTE_PTYPE_INNER_L4_FRAG,
3641 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3642 RTE_PTYPE_TUNNEL_GTPU |
3643 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3644 RTE_PTYPE_INNER_L4_NONFRAG,
3645 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3646 RTE_PTYPE_TUNNEL_GTPU |
3647 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3648 RTE_PTYPE_INNER_L4_UDP,
3649 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3650 RTE_PTYPE_TUNNEL_GTPU |
3651 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3652 RTE_PTYPE_INNER_L4_TCP,
3653 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3654 RTE_PTYPE_TUNNEL_GTPU |
3655 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3656 RTE_PTYPE_INNER_L4_ICMP,
3658 /* IPv4 --> GTPU --> IPv6 */
3659 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3660 RTE_PTYPE_TUNNEL_GTPU |
3661 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3662 RTE_PTYPE_INNER_L4_FRAG,
3663 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3664 RTE_PTYPE_TUNNEL_GTPU |
3665 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3666 RTE_PTYPE_INNER_L4_NONFRAG,
3667 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3668 RTE_PTYPE_TUNNEL_GTPU |
3669 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3670 RTE_PTYPE_INNER_L4_UDP,
3671 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3672 RTE_PTYPE_TUNNEL_GTPU |
3673 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3674 RTE_PTYPE_INNER_L4_TCP,
3675 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3676 RTE_PTYPE_TUNNEL_GTPU |
3677 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3678 RTE_PTYPE_INNER_L4_ICMP,
3680 /* IPv6 --> GTPU --> IPv6 */
3681 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3682 RTE_PTYPE_TUNNEL_GTPU |
3683 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3684 RTE_PTYPE_INNER_L4_FRAG,
3685 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3686 RTE_PTYPE_TUNNEL_GTPU |
3687 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3688 RTE_PTYPE_INNER_L4_NONFRAG,
3689 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3690 RTE_PTYPE_TUNNEL_GTPU |
3691 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3692 RTE_PTYPE_INNER_L4_UDP,
3693 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3694 RTE_PTYPE_TUNNEL_GTPU |
3695 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3696 RTE_PTYPE_INNER_L4_TCP,
3697 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3698 RTE_PTYPE_TUNNEL_GTPU |
3699 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3700 RTE_PTYPE_INNER_L4_ICMP,
3701 /* All others reserved */
3704 return type_table[ptype];
3707 void __attribute__((cold))
3708 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3710 struct ice_adapter *ad =
3711 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3714 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3715 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
3718 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
3719 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
3720 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
3721 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
3722 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
3724 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
3725 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
3726 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
3727 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
3728 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
3729 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
3732 * check the programming status descriptor in rx queue.
3733 * done after Programming Flow Director is programmed on
3737 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
3739 volatile union ice_32byte_rx_desc *rxdp;
3746 rxdp = (volatile union ice_32byte_rx_desc *)
3747 (&rxq->rx_ring[rxq->rx_tail]);
3748 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
3749 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
3750 >> ICE_RXD_QW1_STATUS_S;
3752 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
3754 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
3755 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
3756 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
3757 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
3759 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
3760 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
3761 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
3762 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
3766 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
3767 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
3769 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
3773 rxdp->wb.qword1.status_error_len = 0;
3775 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
3777 if (rxq->rx_tail == 0)
3778 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
3780 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
3786 #define ICE_FDIR_MAX_WAIT_US 10000
3789 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
3791 struct ice_tx_queue *txq = pf->fdir.txq;
3792 struct ice_rx_queue *rxq = pf->fdir.rxq;
3793 volatile struct ice_fltr_desc *fdirdp;
3794 volatile struct ice_tx_desc *txdp;
3798 fdirdp = (volatile struct ice_fltr_desc *)
3799 (&txq->tx_ring[txq->tx_tail]);
3800 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
3801 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
3803 txdp = &txq->tx_ring[txq->tx_tail + 1];
3804 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
3805 td_cmd = ICE_TX_DESC_CMD_EOP |
3806 ICE_TX_DESC_CMD_RS |
3807 ICE_TX_DESC_CMD_DUMMY;
3809 txdp->cmd_type_offset_bsz =
3810 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
3813 if (txq->tx_tail >= txq->nb_tx_desc)
3815 /* Update the tx tail register */
3816 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
3817 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
3818 if ((txdp->cmd_type_offset_bsz &
3819 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
3820 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3824 if (i >= ICE_FDIR_MAX_WAIT_US) {
3826 "Failed to program FDIR filter: time out to get DD on tx queue.");
3830 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
3833 ret = ice_check_fdir_programming_status(rxq);
3841 "Failed to program FDIR filter: programming status reported.");