1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
10 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
14 PKT_TX_OUTER_IP_CKSUM)
17 ice_rxdid_to_proto_xtr_type(uint8_t rxdid)
19 static uint8_t xtr_map[] = {
20 [ICE_RXDID_COMMS_AUX_VLAN] = PROTO_XTR_VLAN,
21 [ICE_RXDID_COMMS_AUX_IPV4] = PROTO_XTR_IPV4,
22 [ICE_RXDID_COMMS_AUX_IPV6] = PROTO_XTR_IPV6,
23 [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = PROTO_XTR_IPV6_FLOW,
24 [ICE_RXDID_COMMS_AUX_TCP] = PROTO_XTR_TCP,
27 return rxdid < RTE_DIM(xtr_map) ? xtr_map[rxdid] : PROTO_XTR_NONE;
31 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
33 static uint8_t rxdid_map[] = {
34 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_GENERIC,
35 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
36 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
37 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
38 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
39 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
42 return xtr_type < RTE_DIM(rxdid_map) ?
43 rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC;
46 static enum ice_status
47 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
49 struct ice_vsi *vsi = rxq->vsi;
50 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
51 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
52 struct ice_rlan_ctx rx_ctx;
54 uint16_t buf_size, len;
55 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
56 uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
59 /* Set buffer size as the head split is disabled. */
60 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
61 RTE_PKTMBUF_HEADROOM);
63 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
64 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
65 rxq->max_pkt_len = RTE_MIN(len,
66 dev->data->dev_conf.rxmode.max_rx_pkt_len);
68 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
69 if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
70 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
71 PMD_DRV_LOG(ERR, "maximum packet length must "
72 "be larger than %u and smaller than %u,"
73 "as jumbo frame is enabled",
74 (uint32_t)RTE_ETHER_MAX_LEN,
75 (uint32_t)ICE_FRAME_SIZE_MAX);
79 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
80 rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
81 PMD_DRV_LOG(ERR, "maximum packet length must be "
82 "larger than %u and smaller than %u, "
83 "as jumbo frame is disabled",
84 (uint32_t)RTE_ETHER_MIN_LEN,
85 (uint32_t)RTE_ETHER_MAX_LEN);
90 memset(&rx_ctx, 0, sizeof(rx_ctx));
92 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
93 rx_ctx.qlen = rxq->nb_rx_desc;
94 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
95 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
96 rx_ctx.dtype = 0; /* No Header Split mode */
97 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
98 rx_ctx.dsize = 1; /* 32B descriptors */
100 rx_ctx.rxmax = rxq->max_pkt_len;
101 /* TPH: Transaction Layer Packet (TLP) processing hints */
102 rx_ctx.tphrdesc_ena = 1;
103 rx_ctx.tphwdesc_ena = 1;
104 rx_ctx.tphdata_ena = 1;
105 rx_ctx.tphhead_ena = 1;
106 /* Low Receive Queue Threshold defined in 64 descriptors units.
107 * When the number of free descriptors goes below the lrxqthresh,
108 * an immediate interrupt is triggered.
110 rx_ctx.lrxqthresh = 2;
111 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
114 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
116 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
118 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
119 rxq->port_id, rxq->queue_id, rxdid);
121 /* Enable Flexible Descriptors in the queue context which
122 * allows this driver to select a specific receive descriptor format
124 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
125 QRXFLXP_CNTXT_RXDID_IDX_M;
127 /* increasing context priority to pick up profile ID;
128 * default is 0x01; setting to 0x03 to ensure profile
129 * is programming if prev context is of same priority
131 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
132 QRXFLXP_CNTXT_RXDID_PRIO_M;
134 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
136 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
138 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
142 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
144 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
149 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
150 RTE_PKTMBUF_HEADROOM);
152 /* Check if scattered RX needs to be used. */
153 if (rxq->max_pkt_len > buf_size)
154 dev->data->scattered_rx = 1;
156 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
158 /* Init the Rx tail register*/
159 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
164 /* Allocate mbufs for all descriptors in rx queue */
166 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
168 struct ice_rx_entry *rxe = rxq->sw_ring;
172 for (i = 0; i < rxq->nb_rx_desc; i++) {
173 volatile union ice_rx_flex_desc *rxd;
174 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
176 if (unlikely(!mbuf)) {
177 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
181 rte_mbuf_refcnt_set(mbuf, 1);
183 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
185 mbuf->port = rxq->port_id;
188 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
190 rxd = &rxq->rx_ring[i];
191 rxd->read.pkt_addr = dma_addr;
192 rxd->read.hdr_addr = 0;
193 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
203 /* Free all mbufs for descriptors in rx queue */
205 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
209 if (!rxq || !rxq->sw_ring) {
210 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
214 for (i = 0; i < rxq->nb_rx_desc; i++) {
215 if (rxq->sw_ring[i].mbuf) {
216 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
217 rxq->sw_ring[i].mbuf = NULL;
220 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
221 if (rxq->rx_nb_avail == 0)
223 for (i = 0; i < rxq->rx_nb_avail; i++) {
224 struct rte_mbuf *mbuf;
226 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
227 rte_pktmbuf_free_seg(mbuf);
229 rxq->rx_nb_avail = 0;
230 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
234 ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
236 rxq->rx_rel_mbufs(rxq);
239 /* turn on or off rx queue
240 * @q_idx: queue index in pf scope
241 * @on: turn on or off the queue
244 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
249 /* QRX_CTRL = QRX_ENA */
250 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
253 if (reg & QRX_CTRL_QENA_STAT_M)
254 return 0; /* Already on, skip */
255 reg |= QRX_CTRL_QENA_REQ_M;
257 if (!(reg & QRX_CTRL_QENA_STAT_M))
258 return 0; /* Already off, skip */
259 reg &= ~QRX_CTRL_QENA_REQ_M;
262 /* Write the register */
263 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
264 /* Check the result. It is said that QENA_STAT
265 * follows the QENA_REQ not more than 10 use.
266 * TODO: need to change the wait counter later
268 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
269 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
270 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
272 if ((reg & QRX_CTRL_QENA_REQ_M) &&
273 (reg & QRX_CTRL_QENA_STAT_M))
276 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
277 !(reg & QRX_CTRL_QENA_STAT_M))
282 /* Check if it is timeout */
283 if (j >= ICE_CHK_Q_ENA_COUNT) {
284 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
285 (on ? "enable" : "disable"), q_idx);
293 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
294 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
296 ice_check_rx_burst_bulk_alloc_preconditions
297 (__rte_unused struct ice_rx_queue *rxq)
302 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
303 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
304 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
305 "rxq->rx_free_thresh=%d, "
306 "ICE_RX_MAX_BURST=%d",
307 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
309 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
310 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
311 "rxq->rx_free_thresh=%d, "
312 "rxq->nb_rx_desc=%d",
313 rxq->rx_free_thresh, rxq->nb_rx_desc);
315 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
316 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
317 "rxq->nb_rx_desc=%d, "
318 "rxq->rx_free_thresh=%d",
319 rxq->nb_rx_desc, rxq->rx_free_thresh);
329 /* reset fields in ice_rx_queue back to default */
331 ice_reset_rx_queue(struct ice_rx_queue *rxq)
337 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
341 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
342 if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
343 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
345 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
346 len = rxq->nb_rx_desc;
348 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
349 ((volatile char *)rxq->rx_ring)[i] = 0;
351 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
352 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
353 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
354 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
356 rxq->rx_nb_avail = 0;
357 rxq->rx_next_avail = 0;
358 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
359 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
363 rxq->pkt_first_seg = NULL;
364 rxq->pkt_last_seg = NULL;
366 rxq->rxrearm_start = 0;
371 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
373 struct ice_rx_queue *rxq;
375 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
377 PMD_INIT_FUNC_TRACE();
379 if (rx_queue_id >= dev->data->nb_rx_queues) {
380 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
381 rx_queue_id, dev->data->nb_rx_queues);
385 rxq = dev->data->rx_queues[rx_queue_id];
386 if (!rxq || !rxq->q_set) {
387 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
392 err = ice_program_hw_rx_queue(rxq);
394 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
399 err = ice_alloc_rx_queue_mbufs(rxq);
401 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
405 /* Init the RX tail register. */
406 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
408 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
410 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
413 ice_rx_queue_release_mbufs(rxq);
414 ice_reset_rx_queue(rxq);
418 dev->data->rx_queue_state[rx_queue_id] =
419 RTE_ETH_QUEUE_STATE_STARTED;
425 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
427 struct ice_rx_queue *rxq;
429 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
431 if (rx_queue_id < dev->data->nb_rx_queues) {
432 rxq = dev->data->rx_queues[rx_queue_id];
434 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
436 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
440 ice_rx_queue_release_mbufs(rxq);
441 ice_reset_rx_queue(rxq);
442 dev->data->rx_queue_state[rx_queue_id] =
443 RTE_ETH_QUEUE_STATE_STOPPED;
450 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
452 struct ice_tx_queue *txq;
456 struct ice_aqc_add_tx_qgrp txq_elem;
457 struct ice_tlan_ctx tx_ctx;
459 PMD_INIT_FUNC_TRACE();
461 if (tx_queue_id >= dev->data->nb_tx_queues) {
462 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
463 tx_queue_id, dev->data->nb_tx_queues);
467 txq = dev->data->tx_queues[tx_queue_id];
468 if (!txq || !txq->q_set) {
469 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
475 hw = ICE_VSI_TO_HW(vsi);
477 memset(&txq_elem, 0, sizeof(txq_elem));
478 memset(&tx_ctx, 0, sizeof(tx_ctx));
479 txq_elem.num_txqs = 1;
480 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
482 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
483 tx_ctx.qlen = txq->nb_tx_desc;
484 tx_ctx.pf_num = hw->pf_id;
485 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
486 tx_ctx.src_vsi = vsi->vsi_id;
487 tx_ctx.port_num = hw->port_info->lport;
488 tx_ctx.tso_ena = 1; /* tso enable */
489 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
490 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
492 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
495 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
497 /* Init the Tx tail register*/
498 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
500 /* Fix me, we assume TC always 0 here */
501 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
502 &txq_elem, sizeof(txq_elem), NULL);
504 PMD_DRV_LOG(ERR, "Failed to add lan txq");
507 /* store the schedule node id */
508 txq->q_teid = txq_elem.txqs[0].q_teid;
510 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
514 static enum ice_status
515 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
517 struct ice_vsi *vsi = rxq->vsi;
518 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
519 uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
520 struct ice_rlan_ctx rx_ctx;
525 rxq->rx_buf_len = 1024;
527 memset(&rx_ctx, 0, sizeof(rx_ctx));
529 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
530 rx_ctx.qlen = rxq->nb_rx_desc;
531 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
532 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
533 rx_ctx.dtype = 0; /* No Header Split mode */
534 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
535 rx_ctx.dsize = 1; /* 32B descriptors */
537 rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
538 /* TPH: Transaction Layer Packet (TLP) processing hints */
539 rx_ctx.tphrdesc_ena = 1;
540 rx_ctx.tphwdesc_ena = 1;
541 rx_ctx.tphdata_ena = 1;
542 rx_ctx.tphhead_ena = 1;
543 /* Low Receive Queue Threshold defined in 64 descriptors units.
544 * When the number of free descriptors goes below the lrxqthresh,
545 * an immediate interrupt is triggered.
547 rx_ctx.lrxqthresh = 2;
548 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
551 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
553 /* Enable Flexible Descriptors in the queue context which
554 * allows this driver to select a specific receive descriptor format
556 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
557 QRXFLXP_CNTXT_RXDID_IDX_M;
559 /* increasing context priority to pick up profile ID;
560 * default is 0x01; setting to 0x03 to ensure profile
561 * is programming if prev context is of same priority
563 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
564 QRXFLXP_CNTXT_RXDID_PRIO_M;
566 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
568 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
570 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
574 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
576 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
581 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
583 /* Init the Rx tail register*/
584 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
590 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
592 struct ice_rx_queue *rxq;
594 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
595 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
597 PMD_INIT_FUNC_TRACE();
600 if (!rxq || !rxq->q_set) {
601 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
606 err = ice_fdir_program_hw_rx_queue(rxq);
608 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
613 /* Init the RX tail register. */
614 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
616 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
618 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
621 ice_reset_rx_queue(rxq);
629 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
631 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
632 struct ice_tx_queue *txq;
636 struct ice_aqc_add_tx_qgrp txq_elem;
637 struct ice_tlan_ctx tx_ctx;
639 PMD_INIT_FUNC_TRACE();
642 if (!txq || !txq->q_set) {
643 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
649 hw = ICE_VSI_TO_HW(vsi);
651 memset(&txq_elem, 0, sizeof(txq_elem));
652 memset(&tx_ctx, 0, sizeof(tx_ctx));
653 txq_elem.num_txqs = 1;
654 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
656 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
657 tx_ctx.qlen = txq->nb_tx_desc;
658 tx_ctx.pf_num = hw->pf_id;
659 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
660 tx_ctx.src_vsi = vsi->vsi_id;
661 tx_ctx.port_num = hw->port_info->lport;
662 tx_ctx.tso_ena = 1; /* tso enable */
663 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
664 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
666 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
669 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
671 /* Init the Tx tail register*/
672 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
674 /* Fix me, we assume TC always 0 here */
675 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
676 &txq_elem, sizeof(txq_elem), NULL);
678 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
681 /* store the schedule node id */
682 txq->q_teid = txq_elem.txqs[0].q_teid;
687 /* Free all mbufs for descriptors in tx queue */
689 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
693 if (!txq || !txq->sw_ring) {
694 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
698 for (i = 0; i < txq->nb_tx_desc; i++) {
699 if (txq->sw_ring[i].mbuf) {
700 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
701 txq->sw_ring[i].mbuf = NULL;
706 ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
708 txq->tx_rel_mbufs(txq);
712 ice_reset_tx_queue(struct ice_tx_queue *txq)
714 struct ice_tx_entry *txe;
715 uint16_t i, prev, size;
718 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
723 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
724 for (i = 0; i < size; i++)
725 ((volatile char *)txq->tx_ring)[i] = 0;
727 prev = (uint16_t)(txq->nb_tx_desc - 1);
728 for (i = 0; i < txq->nb_tx_desc; i++) {
729 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
731 txd->cmd_type_offset_bsz =
732 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
735 txe[prev].next_id = i;
739 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
740 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
745 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
746 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
750 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
752 struct ice_tx_queue *txq;
753 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
754 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
755 struct ice_vsi *vsi = pf->main_vsi;
756 enum ice_status status;
759 uint16_t q_handle = tx_queue_id;
761 if (tx_queue_id >= dev->data->nb_tx_queues) {
762 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
763 tx_queue_id, dev->data->nb_tx_queues);
767 txq = dev->data->tx_queues[tx_queue_id];
769 PMD_DRV_LOG(ERR, "TX queue %u is not available",
774 q_ids[0] = txq->reg_idx;
775 q_teids[0] = txq->q_teid;
777 /* Fix me, we assume TC always 0 here */
778 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
779 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
780 if (status != ICE_SUCCESS) {
781 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
785 ice_tx_queue_release_mbufs(txq);
786 ice_reset_tx_queue(txq);
787 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
793 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
795 struct ice_rx_queue *rxq;
797 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
798 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
802 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
804 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
808 ice_rx_queue_release_mbufs(rxq);
814 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
816 struct ice_tx_queue *txq;
817 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
818 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
819 struct ice_vsi *vsi = pf->main_vsi;
820 enum ice_status status;
823 uint16_t q_handle = tx_queue_id;
827 PMD_DRV_LOG(ERR, "TX queue %u is not available",
833 q_ids[0] = txq->reg_idx;
834 q_teids[0] = txq->q_teid;
836 /* Fix me, we assume TC always 0 here */
837 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
838 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
839 if (status != ICE_SUCCESS) {
840 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
844 ice_tx_queue_release_mbufs(txq);
850 ice_rx_queue_setup(struct rte_eth_dev *dev,
853 unsigned int socket_id,
854 const struct rte_eth_rxconf *rx_conf,
855 struct rte_mempool *mp)
857 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
858 struct ice_adapter *ad =
859 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
860 struct ice_vsi *vsi = pf->main_vsi;
861 struct ice_rx_queue *rxq;
862 const struct rte_memzone *rz;
865 int use_def_burst_func = 1;
867 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
868 nb_desc > ICE_MAX_RING_DESC ||
869 nb_desc < ICE_MIN_RING_DESC) {
870 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
875 /* Free memory if needed */
876 if (dev->data->rx_queues[queue_idx]) {
877 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
878 dev->data->rx_queues[queue_idx] = NULL;
881 /* Allocate the rx queue data structure */
882 rxq = rte_zmalloc_socket(NULL,
883 sizeof(struct ice_rx_queue),
887 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
888 "rx queue data structure");
892 rxq->nb_rx_desc = nb_desc;
893 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
894 rxq->queue_id = queue_idx;
896 rxq->reg_idx = vsi->base_queue + queue_idx;
897 rxq->port_id = dev->data->port_id;
898 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
899 rxq->crc_len = RTE_ETHER_CRC_LEN;
903 rxq->drop_en = rx_conf->rx_drop_en;
905 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
906 rxq->proto_xtr = pf->proto_xtr != NULL ?
907 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
909 /* Allocate the maximun number of RX ring hardware descriptor. */
910 len = ICE_MAX_RING_DESC;
912 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
914 * Allocating a little more memory because vectorized/bulk_alloc Rx
915 * functions doesn't check boundaries each time.
917 len += ICE_RX_MAX_BURST;
920 /* Allocate the maximum number of RX ring hardware descriptor. */
921 ring_size = sizeof(union ice_rx_flex_desc) * len;
922 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
923 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
924 ring_size, ICE_RING_BASE_ALIGN,
927 ice_rx_queue_release(rxq);
928 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
932 /* Zero all the descriptors in the ring. */
933 memset(rz->addr, 0, ring_size);
935 rxq->rx_ring_dma = rz->iova;
936 rxq->rx_ring = rz->addr;
938 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
939 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
944 /* Allocate the software ring. */
945 rxq->sw_ring = rte_zmalloc_socket(NULL,
946 sizeof(struct ice_rx_entry) * len,
950 ice_rx_queue_release(rxq);
951 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
955 ice_reset_rx_queue(rxq);
957 dev->data->rx_queues[queue_idx] = rxq;
958 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
960 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
962 if (!use_def_burst_func) {
963 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
964 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
965 "satisfied. Rx Burst Bulk Alloc function will be "
966 "used on port=%d, queue=%d.",
967 rxq->port_id, rxq->queue_id);
968 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
970 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
971 "not satisfied, Scattered Rx is requested, "
972 "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
973 "not enabled on port=%d, queue=%d.",
974 rxq->port_id, rxq->queue_id);
975 ad->rx_bulk_alloc_allowed = false;
982 ice_rx_queue_release(void *rxq)
984 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
987 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
991 ice_rx_queue_release_mbufs(q);
992 rte_free(q->sw_ring);
997 ice_tx_queue_setup(struct rte_eth_dev *dev,
1000 unsigned int socket_id,
1001 const struct rte_eth_txconf *tx_conf)
1003 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1004 struct ice_vsi *vsi = pf->main_vsi;
1005 struct ice_tx_queue *txq;
1006 const struct rte_memzone *tz;
1008 uint16_t tx_rs_thresh, tx_free_thresh;
1011 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1013 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1014 nb_desc > ICE_MAX_RING_DESC ||
1015 nb_desc < ICE_MIN_RING_DESC) {
1016 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1017 "invalid", nb_desc);
1022 * The following two parameters control the setting of the RS bit on
1023 * transmit descriptors. TX descriptors will have their RS bit set
1024 * after txq->tx_rs_thresh descriptors have been used. The TX
1025 * descriptor ring will be cleaned after txq->tx_free_thresh
1026 * descriptors are used or if the number of descriptors required to
1027 * transmit a packet is greater than the number of free TX descriptors.
1029 * The following constraints must be satisfied:
1030 * - tx_rs_thresh must be greater than 0.
1031 * - tx_rs_thresh must be less than the size of the ring minus 2.
1032 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1033 * - tx_rs_thresh must be a divisor of the ring size.
1034 * - tx_free_thresh must be greater than 0.
1035 * - tx_free_thresh must be less than the size of the ring minus 3.
1036 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1038 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1039 * race condition, hence the maximum threshold constraints. When set
1040 * to zero use default values.
1042 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1043 tx_conf->tx_free_thresh :
1044 ICE_DEFAULT_TX_FREE_THRESH);
1045 /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1047 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1048 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1049 if (tx_conf->tx_rs_thresh)
1050 tx_rs_thresh = tx_conf->tx_rs_thresh;
1051 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1052 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1053 "exceed nb_desc. (tx_rs_thresh=%u "
1054 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1055 (unsigned int)tx_rs_thresh,
1056 (unsigned int)tx_free_thresh,
1057 (unsigned int)nb_desc,
1058 (int)dev->data->port_id,
1062 if (tx_rs_thresh >= (nb_desc - 2)) {
1063 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1064 "number of TX descriptors minus 2. "
1065 "(tx_rs_thresh=%u port=%d queue=%d)",
1066 (unsigned int)tx_rs_thresh,
1067 (int)dev->data->port_id,
1071 if (tx_free_thresh >= (nb_desc - 3)) {
1072 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1073 "tx_free_thresh must be less than the "
1074 "number of TX descriptors minus 3. "
1075 "(tx_free_thresh=%u port=%d queue=%d)",
1076 (unsigned int)tx_free_thresh,
1077 (int)dev->data->port_id,
1081 if (tx_rs_thresh > tx_free_thresh) {
1082 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1083 "equal to tx_free_thresh. (tx_free_thresh=%u"
1084 " tx_rs_thresh=%u port=%d queue=%d)",
1085 (unsigned int)tx_free_thresh,
1086 (unsigned int)tx_rs_thresh,
1087 (int)dev->data->port_id,
1091 if ((nb_desc % tx_rs_thresh) != 0) {
1092 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1093 "number of TX descriptors. (tx_rs_thresh=%u"
1094 " port=%d queue=%d)",
1095 (unsigned int)tx_rs_thresh,
1096 (int)dev->data->port_id,
1100 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1101 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1102 "tx_rs_thresh is greater than 1. "
1103 "(tx_rs_thresh=%u port=%d queue=%d)",
1104 (unsigned int)tx_rs_thresh,
1105 (int)dev->data->port_id,
1110 /* Free memory if needed. */
1111 if (dev->data->tx_queues[queue_idx]) {
1112 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1113 dev->data->tx_queues[queue_idx] = NULL;
1116 /* Allocate the TX queue data structure. */
1117 txq = rte_zmalloc_socket(NULL,
1118 sizeof(struct ice_tx_queue),
1119 RTE_CACHE_LINE_SIZE,
1122 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1123 "tx queue structure");
1127 /* Allocate TX hardware ring descriptors. */
1128 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1129 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1130 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1131 ring_size, ICE_RING_BASE_ALIGN,
1134 ice_tx_queue_release(txq);
1135 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1139 txq->nb_tx_desc = nb_desc;
1140 txq->tx_rs_thresh = tx_rs_thresh;
1141 txq->tx_free_thresh = tx_free_thresh;
1142 txq->pthresh = tx_conf->tx_thresh.pthresh;
1143 txq->hthresh = tx_conf->tx_thresh.hthresh;
1144 txq->wthresh = tx_conf->tx_thresh.wthresh;
1145 txq->queue_id = queue_idx;
1147 txq->reg_idx = vsi->base_queue + queue_idx;
1148 txq->port_id = dev->data->port_id;
1149 txq->offloads = offloads;
1151 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1153 txq->tx_ring_dma = tz->iova;
1154 txq->tx_ring = tz->addr;
1156 /* Allocate software ring */
1158 rte_zmalloc_socket(NULL,
1159 sizeof(struct ice_tx_entry) * nb_desc,
1160 RTE_CACHE_LINE_SIZE,
1162 if (!txq->sw_ring) {
1163 ice_tx_queue_release(txq);
1164 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1168 ice_reset_tx_queue(txq);
1170 dev->data->tx_queues[queue_idx] = txq;
1171 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1172 ice_set_tx_function_flag(dev, txq);
1178 ice_tx_queue_release(void *txq)
1180 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1183 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1187 ice_tx_queue_release_mbufs(q);
1188 rte_free(q->sw_ring);
1193 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1194 struct rte_eth_rxq_info *qinfo)
1196 struct ice_rx_queue *rxq;
1198 rxq = dev->data->rx_queues[queue_id];
1200 qinfo->mp = rxq->mp;
1201 qinfo->scattered_rx = dev->data->scattered_rx;
1202 qinfo->nb_desc = rxq->nb_rx_desc;
1204 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1205 qinfo->conf.rx_drop_en = rxq->drop_en;
1206 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1210 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1211 struct rte_eth_txq_info *qinfo)
1213 struct ice_tx_queue *txq;
1215 txq = dev->data->tx_queues[queue_id];
1217 qinfo->nb_desc = txq->nb_tx_desc;
1219 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1220 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1221 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1223 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1224 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1225 qinfo->conf.offloads = txq->offloads;
1226 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1230 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1232 #define ICE_RXQ_SCAN_INTERVAL 4
1233 volatile union ice_rx_flex_desc *rxdp;
1234 struct ice_rx_queue *rxq;
1237 rxq = dev->data->rx_queues[rx_queue_id];
1238 rxdp = &rxq->rx_ring[rxq->rx_tail];
1239 while ((desc < rxq->nb_rx_desc) &&
1240 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1241 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1243 * Check the DD bit of a rx descriptor of each 4 in a group,
1244 * to avoid checking too frequently and downgrading performance
1247 desc += ICE_RXQ_SCAN_INTERVAL;
1248 rxdp += ICE_RXQ_SCAN_INTERVAL;
1249 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1250 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1251 desc - rxq->nb_rx_desc]);
1257 #define ICE_RX_FLEX_ERR0_BITS \
1258 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1259 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1260 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1261 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1262 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1263 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1265 /* Rx L3/L4 checksum */
1266 static inline uint64_t
1267 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1271 /* check if HW has decoded the packet and checksum */
1272 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1275 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1276 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1280 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1281 flags |= PKT_RX_IP_CKSUM_BAD;
1283 flags |= PKT_RX_IP_CKSUM_GOOD;
1285 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1286 flags |= PKT_RX_L4_CKSUM_BAD;
1288 flags |= PKT_RX_L4_CKSUM_GOOD;
1290 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1291 flags |= PKT_RX_EIP_CKSUM_BAD;
1297 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1299 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1300 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1301 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1303 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1304 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1305 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1310 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1311 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1312 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1313 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1314 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1315 mb->vlan_tci_outer = mb->vlan_tci;
1316 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1317 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1318 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1319 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1321 mb->vlan_tci_outer = 0;
1324 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1325 mb->vlan_tci, mb->vlan_tci_outer);
1328 #define ICE_RX_PROTO_XTR_VALID \
1329 ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
1330 (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
1333 ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
1334 volatile union ice_rx_flex_desc *rxdp)
1336 volatile struct ice_32b_rx_flex_desc_comms *desc =
1337 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
1340 stat_err = rte_le_to_cpu_16(desc->status_error0);
1341 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
1342 mb->ol_flags |= PKT_RX_RSS_HASH;
1343 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
1346 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1347 init_proto_xtr_flds(mb);
1349 stat_err = rte_le_to_cpu_16(desc->status_error1);
1350 if (stat_err & ICE_RX_PROTO_XTR_VALID) {
1351 struct proto_xtr_flds *xtr = get_proto_xtr_flds(mb);
1353 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
1355 rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
1357 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
1359 rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
1361 xtr->type = ice_rxdid_to_proto_xtr_type(desc->rxdid);
1362 xtr->magic = PROTO_XTR_MAGIC_ID;
1365 if (desc->flow_id != 0xFFFFFFFF) {
1366 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1367 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
1372 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1373 #define ICE_LOOK_AHEAD 8
1374 #if (ICE_LOOK_AHEAD != 8)
1375 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1378 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1380 volatile union ice_rx_flex_desc *rxdp;
1381 struct ice_rx_entry *rxep;
1382 struct rte_mbuf *mb;
1385 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1386 int32_t i, j, nb_rx = 0;
1387 uint64_t pkt_flags = 0;
1388 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1390 rxdp = &rxq->rx_ring[rxq->rx_tail];
1391 rxep = &rxq->sw_ring[rxq->rx_tail];
1393 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1395 /* Make sure there is at least 1 packet to receive */
1396 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1400 * Scan LOOK_AHEAD descriptors at a time to determine which
1401 * descriptors reference packets that are ready to be received.
1403 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1404 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1405 /* Read desc statuses backwards to avoid race condition */
1406 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1407 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1411 /* Compute how many status bits were set */
1412 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1413 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1417 /* Translate descriptor info to mbuf parameters */
1418 for (j = 0; j < nb_dd; j++) {
1420 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1421 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1422 mb->data_len = pkt_len;
1423 mb->pkt_len = pkt_len;
1425 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1426 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1427 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1428 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1429 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1430 ice_rxd_to_pkt_fields(mb, &rxdp[j]);
1432 mb->ol_flags |= pkt_flags;
1435 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1436 rxq->rx_stage[i + j] = rxep[j].mbuf;
1438 if (nb_dd != ICE_LOOK_AHEAD)
1442 /* Clear software ring entries */
1443 for (i = 0; i < nb_rx; i++)
1444 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1446 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1447 "port_id=%u, queue_id=%u, nb_rx=%d",
1448 rxq->port_id, rxq->queue_id, nb_rx);
1453 static inline uint16_t
1454 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1455 struct rte_mbuf **rx_pkts,
1459 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1461 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1463 for (i = 0; i < nb_pkts; i++)
1464 rx_pkts[i] = stage[i];
1466 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1467 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1473 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1475 volatile union ice_rx_flex_desc *rxdp;
1476 struct ice_rx_entry *rxep;
1477 struct rte_mbuf *mb;
1478 uint16_t alloc_idx, i;
1482 /* Allocate buffers in bulk */
1483 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1484 (rxq->rx_free_thresh - 1));
1485 rxep = &rxq->sw_ring[alloc_idx];
1486 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1487 rxq->rx_free_thresh);
1488 if (unlikely(diag != 0)) {
1489 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1493 rxdp = &rxq->rx_ring[alloc_idx];
1494 for (i = 0; i < rxq->rx_free_thresh; i++) {
1495 if (likely(i < (rxq->rx_free_thresh - 1)))
1496 /* Prefetch next mbuf */
1497 rte_prefetch0(rxep[i + 1].mbuf);
1500 rte_mbuf_refcnt_set(mb, 1);
1502 mb->data_off = RTE_PKTMBUF_HEADROOM;
1504 mb->port = rxq->port_id;
1505 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1506 rxdp[i].read.hdr_addr = 0;
1507 rxdp[i].read.pkt_addr = dma_addr;
1510 /* Update rx tail regsiter */
1511 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1513 rxq->rx_free_trigger =
1514 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1515 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1516 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1521 static inline uint16_t
1522 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1524 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1526 struct rte_eth_dev *dev;
1531 if (rxq->rx_nb_avail)
1532 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1534 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1535 rxq->rx_next_avail = 0;
1536 rxq->rx_nb_avail = nb_rx;
1537 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1539 if (rxq->rx_tail > rxq->rx_free_trigger) {
1540 if (ice_rx_alloc_bufs(rxq) != 0) {
1543 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1544 dev->data->rx_mbuf_alloc_failed +=
1545 rxq->rx_free_thresh;
1546 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1547 "port_id=%u, queue_id=%u",
1548 rxq->port_id, rxq->queue_id);
1549 rxq->rx_nb_avail = 0;
1550 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1551 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1552 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1558 if (rxq->rx_tail >= rxq->nb_rx_desc)
1561 if (rxq->rx_nb_avail)
1562 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1568 ice_recv_pkts_bulk_alloc(void *rx_queue,
1569 struct rte_mbuf **rx_pkts,
1576 if (unlikely(nb_pkts == 0))
1579 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1580 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1583 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1584 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1585 nb_rx = (uint16_t)(nb_rx + count);
1586 nb_pkts = (uint16_t)(nb_pkts - count);
1595 ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
1596 struct rte_mbuf __rte_unused **rx_pkts,
1597 uint16_t __rte_unused nb_pkts)
1601 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
1604 ice_recv_scattered_pkts(void *rx_queue,
1605 struct rte_mbuf **rx_pkts,
1608 struct ice_rx_queue *rxq = rx_queue;
1609 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1610 volatile union ice_rx_flex_desc *rxdp;
1611 union ice_rx_flex_desc rxd;
1612 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1613 struct ice_rx_entry *rxe;
1614 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1615 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1616 struct rte_mbuf *nmb; /* new allocated mbuf */
1617 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1618 uint16_t rx_id = rxq->rx_tail;
1620 uint16_t nb_hold = 0;
1621 uint16_t rx_packet_len;
1622 uint16_t rx_stat_err0;
1625 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1626 struct rte_eth_dev *dev;
1628 while (nb_rx < nb_pkts) {
1629 rxdp = &rx_ring[rx_id];
1630 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1632 /* Check the DD bit first */
1633 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1637 nmb = rte_mbuf_raw_alloc(rxq->mp);
1638 if (unlikely(!nmb)) {
1639 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1640 dev->data->rx_mbuf_alloc_failed++;
1643 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1646 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1648 if (unlikely(rx_id == rxq->nb_rx_desc))
1651 /* Prefetch next mbuf */
1652 rte_prefetch0(sw_ring[rx_id].mbuf);
1655 * When next RX descriptor is on a cache line boundary,
1656 * prefetch the next 4 RX descriptors and next 8 pointers
1659 if ((rx_id & 0x3) == 0) {
1660 rte_prefetch0(&rx_ring[rx_id]);
1661 rte_prefetch0(&sw_ring[rx_id]);
1667 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1669 /* Set data buffer address and data length of the mbuf */
1670 rxdp->read.hdr_addr = 0;
1671 rxdp->read.pkt_addr = dma_addr;
1672 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1673 ICE_RX_FLX_DESC_PKT_LEN_M;
1674 rxm->data_len = rx_packet_len;
1675 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1678 * If this is the first buffer of the received packet, set the
1679 * pointer to the first mbuf of the packet and initialize its
1680 * context. Otherwise, update the total length and the number
1681 * of segments of the current scattered packet, and update the
1682 * pointer to the last mbuf of the current packet.
1686 first_seg->nb_segs = 1;
1687 first_seg->pkt_len = rx_packet_len;
1689 first_seg->pkt_len =
1690 (uint16_t)(first_seg->pkt_len +
1692 first_seg->nb_segs++;
1693 last_seg->next = rxm;
1697 * If this is not the last buffer of the received packet,
1698 * update the pointer to the last mbuf of the current scattered
1699 * packet and continue to parse the RX ring.
1701 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1707 * This is the last buffer of the received packet. If the CRC
1708 * is not stripped by the hardware:
1709 * - Subtract the CRC length from the total packet length.
1710 * - If the last buffer only contains the whole CRC or a part
1711 * of it, free the mbuf associated to the last buffer. If part
1712 * of the CRC is also contained in the previous mbuf, subtract
1713 * the length of that CRC part from the data length of the
1717 if (unlikely(rxq->crc_len > 0)) {
1718 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1719 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1720 rte_pktmbuf_free_seg(rxm);
1721 first_seg->nb_segs--;
1722 last_seg->data_len =
1723 (uint16_t)(last_seg->data_len -
1724 (RTE_ETHER_CRC_LEN - rx_packet_len));
1725 last_seg->next = NULL;
1727 rxm->data_len = (uint16_t)(rx_packet_len -
1731 first_seg->port = rxq->port_id;
1732 first_seg->ol_flags = 0;
1733 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1734 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1735 ice_rxd_to_vlan_tci(first_seg, &rxd);
1736 ice_rxd_to_pkt_fields(first_seg, &rxd);
1737 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1738 first_seg->ol_flags |= pkt_flags;
1739 /* Prefetch data of first segment, if configured to do so. */
1740 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1741 first_seg->data_off));
1742 rx_pkts[nb_rx++] = first_seg;
1746 /* Record index of the next RX descriptor to probe. */
1747 rxq->rx_tail = rx_id;
1748 rxq->pkt_first_seg = first_seg;
1749 rxq->pkt_last_seg = last_seg;
1752 * If the number of free RX descriptors is greater than the RX free
1753 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1754 * register. Update the RDT with the value of the last processed RX
1755 * descriptor minus 1, to guarantee that the RDT register is never
1756 * equal to the RDH register, which creates a "full" ring situtation
1757 * from the hardware point of view.
1759 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1760 if (nb_hold > rxq->rx_free_thresh) {
1761 rx_id = (uint16_t)(rx_id == 0 ?
1762 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1763 /* write TAIL register */
1764 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1767 rxq->nb_rx_hold = nb_hold;
1769 /* return received packet in the burst */
1774 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1776 struct ice_adapter *ad =
1777 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1778 const uint32_t *ptypes;
1780 static const uint32_t ptypes_os[] = {
1781 /* refers to ice_get_default_pkt_type() */
1783 RTE_PTYPE_L2_ETHER_TIMESYNC,
1784 RTE_PTYPE_L2_ETHER_LLDP,
1785 RTE_PTYPE_L2_ETHER_ARP,
1786 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1787 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1790 RTE_PTYPE_L4_NONFRAG,
1794 RTE_PTYPE_TUNNEL_GRENAT,
1795 RTE_PTYPE_TUNNEL_IP,
1796 RTE_PTYPE_INNER_L2_ETHER,
1797 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1798 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1799 RTE_PTYPE_INNER_L4_FRAG,
1800 RTE_PTYPE_INNER_L4_ICMP,
1801 RTE_PTYPE_INNER_L4_NONFRAG,
1802 RTE_PTYPE_INNER_L4_SCTP,
1803 RTE_PTYPE_INNER_L4_TCP,
1804 RTE_PTYPE_INNER_L4_UDP,
1808 static const uint32_t ptypes_comms[] = {
1809 /* refers to ice_get_default_pkt_type() */
1811 RTE_PTYPE_L2_ETHER_TIMESYNC,
1812 RTE_PTYPE_L2_ETHER_LLDP,
1813 RTE_PTYPE_L2_ETHER_ARP,
1814 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1815 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1818 RTE_PTYPE_L4_NONFRAG,
1822 RTE_PTYPE_TUNNEL_GRENAT,
1823 RTE_PTYPE_TUNNEL_IP,
1824 RTE_PTYPE_INNER_L2_ETHER,
1825 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1826 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1827 RTE_PTYPE_INNER_L4_FRAG,
1828 RTE_PTYPE_INNER_L4_ICMP,
1829 RTE_PTYPE_INNER_L4_NONFRAG,
1830 RTE_PTYPE_INNER_L4_SCTP,
1831 RTE_PTYPE_INNER_L4_TCP,
1832 RTE_PTYPE_INNER_L4_UDP,
1833 RTE_PTYPE_TUNNEL_GTPC,
1834 RTE_PTYPE_TUNNEL_GTPU,
1835 RTE_PTYPE_L2_ETHER_PPPOE,
1839 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1840 ptypes = ptypes_comms;
1844 if (dev->rx_pkt_burst == ice_recv_pkts ||
1845 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
1846 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1848 dev->rx_pkt_burst == ice_recv_scattered_pkts)
1852 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1853 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1854 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1855 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1863 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1865 volatile union ice_rx_flex_desc *rxdp;
1866 struct ice_rx_queue *rxq = rx_queue;
1869 if (unlikely(offset >= rxq->nb_rx_desc))
1872 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1873 return RTE_ETH_RX_DESC_UNAVAIL;
1875 desc = rxq->rx_tail + offset;
1876 if (desc >= rxq->nb_rx_desc)
1877 desc -= rxq->nb_rx_desc;
1879 rxdp = &rxq->rx_ring[desc];
1880 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1881 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
1882 return RTE_ETH_RX_DESC_DONE;
1884 return RTE_ETH_RX_DESC_AVAIL;
1888 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1890 struct ice_tx_queue *txq = tx_queue;
1891 volatile uint64_t *status;
1892 uint64_t mask, expect;
1895 if (unlikely(offset >= txq->nb_tx_desc))
1898 desc = txq->tx_tail + offset;
1899 /* go to next desc that has the RS bit */
1900 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1902 if (desc >= txq->nb_tx_desc) {
1903 desc -= txq->nb_tx_desc;
1904 if (desc >= txq->nb_tx_desc)
1905 desc -= txq->nb_tx_desc;
1908 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1909 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1910 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1911 ICE_TXD_QW1_DTYPE_S);
1912 if ((*status & mask) == expect)
1913 return RTE_ETH_TX_DESC_DONE;
1915 return RTE_ETH_TX_DESC_FULL;
1919 ice_clear_queues(struct rte_eth_dev *dev)
1923 PMD_INIT_FUNC_TRACE();
1925 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1926 ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
1927 ice_reset_tx_queue(dev->data->tx_queues[i]);
1930 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1931 ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
1932 ice_reset_rx_queue(dev->data->rx_queues[i]);
1937 ice_free_queues(struct rte_eth_dev *dev)
1941 PMD_INIT_FUNC_TRACE();
1943 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1944 if (!dev->data->rx_queues[i])
1946 ice_rx_queue_release(dev->data->rx_queues[i]);
1947 dev->data->rx_queues[i] = NULL;
1949 dev->data->nb_rx_queues = 0;
1951 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1952 if (!dev->data->tx_queues[i])
1954 ice_tx_queue_release(dev->data->tx_queues[i]);
1955 dev->data->tx_queues[i] = NULL;
1957 dev->data->nb_tx_queues = 0;
1960 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
1961 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
1964 ice_fdir_setup_tx_resources(struct ice_pf *pf)
1966 struct ice_tx_queue *txq;
1967 const struct rte_memzone *tz = NULL;
1969 struct rte_eth_dev *dev;
1972 PMD_DRV_LOG(ERR, "PF is not available");
1976 dev = pf->adapter->eth_dev;
1978 /* Allocate the TX queue data structure. */
1979 txq = rte_zmalloc_socket("ice fdir tx queue",
1980 sizeof(struct ice_tx_queue),
1981 RTE_CACHE_LINE_SIZE,
1984 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1985 "tx queue structure.");
1989 /* Allocate TX hardware ring descriptors. */
1990 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
1991 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1993 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
1994 ICE_FDIR_QUEUE_ID, ring_size,
1995 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
1997 ice_tx_queue_release(txq);
1998 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2002 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2003 txq->queue_id = ICE_FDIR_QUEUE_ID;
2004 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2005 txq->vsi = pf->fdir.fdir_vsi;
2007 txq->tx_ring_dma = tz->iova;
2008 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2010 * don't need to allocate software ring and reset for the fdir
2011 * program queue just set the queue has been configured.
2016 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2022 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2024 struct ice_rx_queue *rxq;
2025 const struct rte_memzone *rz = NULL;
2027 struct rte_eth_dev *dev;
2030 PMD_DRV_LOG(ERR, "PF is not available");
2034 dev = pf->adapter->eth_dev;
2036 /* Allocate the RX queue data structure. */
2037 rxq = rte_zmalloc_socket("ice fdir rx queue",
2038 sizeof(struct ice_rx_queue),
2039 RTE_CACHE_LINE_SIZE,
2042 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2043 "rx queue structure.");
2047 /* Allocate RX hardware ring descriptors. */
2048 ring_size = sizeof(union ice_rx_flex_desc) * ICE_FDIR_NUM_RX_DESC;
2049 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2051 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2052 ICE_FDIR_QUEUE_ID, ring_size,
2053 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2055 ice_rx_queue_release(rxq);
2056 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2060 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2061 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2062 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2063 rxq->vsi = pf->fdir.fdir_vsi;
2065 rxq->rx_ring_dma = rz->iova;
2066 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2067 sizeof(union ice_rx_flex_desc));
2068 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2071 * Don't need to allocate software ring and reset for the fdir
2072 * rx queue, just set the queue has been configured.
2077 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2083 ice_recv_pkts(void *rx_queue,
2084 struct rte_mbuf **rx_pkts,
2087 struct ice_rx_queue *rxq = rx_queue;
2088 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2089 volatile union ice_rx_flex_desc *rxdp;
2090 union ice_rx_flex_desc rxd;
2091 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2092 struct ice_rx_entry *rxe;
2093 struct rte_mbuf *nmb; /* new allocated mbuf */
2094 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2095 uint16_t rx_id = rxq->rx_tail;
2097 uint16_t nb_hold = 0;
2098 uint16_t rx_packet_len;
2099 uint16_t rx_stat_err0;
2102 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2103 struct rte_eth_dev *dev;
2105 while (nb_rx < nb_pkts) {
2106 rxdp = &rx_ring[rx_id];
2107 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2109 /* Check the DD bit first */
2110 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2114 nmb = rte_mbuf_raw_alloc(rxq->mp);
2115 if (unlikely(!nmb)) {
2116 dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2117 dev->data->rx_mbuf_alloc_failed++;
2120 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2123 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2125 if (unlikely(rx_id == rxq->nb_rx_desc))
2130 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2133 * fill the read format of descriptor with physic address in
2134 * new allocated mbuf: nmb
2136 rxdp->read.hdr_addr = 0;
2137 rxdp->read.pkt_addr = dma_addr;
2139 /* calculate rx_packet_len of the received pkt */
2140 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2141 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2143 /* fill old mbuf with received descriptor: rxd */
2144 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2145 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2148 rxm->pkt_len = rx_packet_len;
2149 rxm->data_len = rx_packet_len;
2150 rxm->port = rxq->port_id;
2151 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2152 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2153 ice_rxd_to_vlan_tci(rxm, &rxd);
2154 ice_rxd_to_pkt_fields(rxm, &rxd);
2155 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2156 rxm->ol_flags |= pkt_flags;
2157 /* copy old mbuf to rx_pkts */
2158 rx_pkts[nb_rx++] = rxm;
2160 rxq->rx_tail = rx_id;
2162 * If the number of free RX descriptors is greater than the RX free
2163 * threshold of the queue, advance the receive tail register of queue.
2164 * Update that register with the value of the last processed RX
2165 * descriptor minus 1.
2167 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2168 if (nb_hold > rxq->rx_free_thresh) {
2169 rx_id = (uint16_t)(rx_id == 0 ?
2170 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2171 /* write TAIL register */
2172 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
2175 rxq->nb_rx_hold = nb_hold;
2177 /* return received packet in the burst */
2182 ice_parse_tunneling_params(uint64_t ol_flags,
2183 union ice_tx_offload tx_offload,
2184 uint32_t *cd_tunneling)
2186 /* EIPT: External (outer) IP header type */
2187 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2188 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2189 else if (ol_flags & PKT_TX_OUTER_IPV4)
2190 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2191 else if (ol_flags & PKT_TX_OUTER_IPV6)
2192 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2194 /* EIPLEN: External (outer) IP header length, in DWords */
2195 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2196 ICE_TXD_CTX_QW0_EIPLEN_S;
2198 /* L4TUNT: L4 Tunneling Type */
2199 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2200 case PKT_TX_TUNNEL_IPIP:
2201 /* for non UDP / GRE tunneling, set to 00b */
2203 case PKT_TX_TUNNEL_VXLAN:
2204 case PKT_TX_TUNNEL_GTP:
2205 case PKT_TX_TUNNEL_GENEVE:
2206 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2208 case PKT_TX_TUNNEL_GRE:
2209 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2212 PMD_TX_LOG(ERR, "Tunnel type not supported");
2216 /* L4TUNLEN: L4 Tunneling Length, in Words
2218 * We depend on app to set rte_mbuf.l2_len correctly.
2219 * For IP in GRE it should be set to the length of the GRE
2221 * For MAC in GRE or MAC in UDP it should be set to the length
2222 * of the GRE or UDP headers plus the inner MAC up to including
2223 * its last Ethertype.
2224 * If MPLS labels exists, it should include them as well.
2226 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2227 ICE_TXD_CTX_QW0_NATLEN_S;
2229 if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
2230 (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2231 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2232 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2236 ice_txd_enable_checksum(uint64_t ol_flags,
2238 uint32_t *td_offset,
2239 union ice_tx_offload tx_offload)
2242 if (ol_flags & PKT_TX_TUNNEL_MASK)
2243 *td_offset |= (tx_offload.outer_l2_len >> 1)
2244 << ICE_TX_DESC_LEN_MACLEN_S;
2246 *td_offset |= (tx_offload.l2_len >> 1)
2247 << ICE_TX_DESC_LEN_MACLEN_S;
2249 /* Enable L3 checksum offloads */
2250 if (ol_flags & PKT_TX_IP_CKSUM) {
2251 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2252 *td_offset |= (tx_offload.l3_len >> 2) <<
2253 ICE_TX_DESC_LEN_IPLEN_S;
2254 } else if (ol_flags & PKT_TX_IPV4) {
2255 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2256 *td_offset |= (tx_offload.l3_len >> 2) <<
2257 ICE_TX_DESC_LEN_IPLEN_S;
2258 } else if (ol_flags & PKT_TX_IPV6) {
2259 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2260 *td_offset |= (tx_offload.l3_len >> 2) <<
2261 ICE_TX_DESC_LEN_IPLEN_S;
2264 if (ol_flags & PKT_TX_TCP_SEG) {
2265 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2266 *td_offset |= (tx_offload.l4_len >> 2) <<
2267 ICE_TX_DESC_LEN_L4_LEN_S;
2271 /* Enable L4 checksum offloads */
2272 switch (ol_flags & PKT_TX_L4_MASK) {
2273 case PKT_TX_TCP_CKSUM:
2274 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2275 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2276 ICE_TX_DESC_LEN_L4_LEN_S;
2278 case PKT_TX_SCTP_CKSUM:
2279 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2280 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2281 ICE_TX_DESC_LEN_L4_LEN_S;
2283 case PKT_TX_UDP_CKSUM:
2284 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2285 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2286 ICE_TX_DESC_LEN_L4_LEN_S;
2294 ice_xmit_cleanup(struct ice_tx_queue *txq)
2296 struct ice_tx_entry *sw_ring = txq->sw_ring;
2297 volatile struct ice_tx_desc *txd = txq->tx_ring;
2298 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2299 uint16_t nb_tx_desc = txq->nb_tx_desc;
2300 uint16_t desc_to_clean_to;
2301 uint16_t nb_tx_to_clean;
2303 /* Determine the last descriptor needing to be cleaned */
2304 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2305 if (desc_to_clean_to >= nb_tx_desc)
2306 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2308 /* Check to make sure the last descriptor to clean is done */
2309 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2310 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2311 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2312 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
2313 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2315 txq->port_id, txq->queue_id,
2316 txd[desc_to_clean_to].cmd_type_offset_bsz);
2317 /* Failed to clean any descriptors */
2321 /* Figure out how many descriptors will be cleaned */
2322 if (last_desc_cleaned > desc_to_clean_to)
2323 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2326 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2329 /* The last descriptor to clean is done, so that means all the
2330 * descriptors from the last descriptor that was cleaned
2331 * up to the last descriptor with the RS bit set
2332 * are done. Only reset the threshold descriptor.
2334 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2336 /* Update the txq to reflect the last descriptor that was cleaned */
2337 txq->last_desc_cleaned = desc_to_clean_to;
2338 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2343 /* Construct the tx flags */
2344 static inline uint64_t
2345 ice_build_ctob(uint32_t td_cmd,
2350 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2351 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2352 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2353 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2354 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2357 /* Check if the context descriptor is needed for TX offloading */
2358 static inline uint16_t
2359 ice_calc_context_desc(uint64_t flags)
2361 static uint64_t mask = PKT_TX_TCP_SEG |
2363 PKT_TX_OUTER_IP_CKSUM |
2366 return (flags & mask) ? 1 : 0;
2369 /* set ice TSO context descriptor */
2370 static inline uint64_t
2371 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2373 uint64_t ctx_desc = 0;
2374 uint32_t cd_cmd, hdr_len, cd_tso_len;
2376 if (!tx_offload.l4_len) {
2377 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2381 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2382 hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2383 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2385 cd_cmd = ICE_TX_CTX_DESC_TSO;
2386 cd_tso_len = mbuf->pkt_len - hdr_len;
2387 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2388 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2389 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2395 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2397 struct ice_tx_queue *txq;
2398 volatile struct ice_tx_desc *tx_ring;
2399 volatile struct ice_tx_desc *txd;
2400 struct ice_tx_entry *sw_ring;
2401 struct ice_tx_entry *txe, *txn;
2402 struct rte_mbuf *tx_pkt;
2403 struct rte_mbuf *m_seg;
2404 uint32_t cd_tunneling_params;
2409 uint32_t td_cmd = 0;
2410 uint32_t td_offset = 0;
2411 uint32_t td_tag = 0;
2413 uint64_t buf_dma_addr;
2415 union ice_tx_offload tx_offload = {0};
2418 sw_ring = txq->sw_ring;
2419 tx_ring = txq->tx_ring;
2420 tx_id = txq->tx_tail;
2421 txe = &sw_ring[tx_id];
2423 /* Check if the descriptor ring needs to be cleaned. */
2424 if (txq->nb_tx_free < txq->tx_free_thresh)
2425 ice_xmit_cleanup(txq);
2427 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2428 tx_pkt = *tx_pkts++;
2431 ol_flags = tx_pkt->ol_flags;
2432 tx_offload.l2_len = tx_pkt->l2_len;
2433 tx_offload.l3_len = tx_pkt->l3_len;
2434 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2435 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2436 tx_offload.l4_len = tx_pkt->l4_len;
2437 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2438 /* Calculate the number of context descriptors needed. */
2439 nb_ctx = ice_calc_context_desc(ol_flags);
2441 /* The number of descriptors that must be allocated for
2442 * a packet equals to the number of the segments of that
2443 * packet plus the number of context descriptor if needed.
2445 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2446 tx_last = (uint16_t)(tx_id + nb_used - 1);
2449 if (tx_last >= txq->nb_tx_desc)
2450 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2452 if (nb_used > txq->nb_tx_free) {
2453 if (ice_xmit_cleanup(txq) != 0) {
2458 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2459 while (nb_used > txq->nb_tx_free) {
2460 if (ice_xmit_cleanup(txq) != 0) {
2469 /* Descriptor based VLAN insertion */
2470 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2471 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2472 td_tag = tx_pkt->vlan_tci;
2475 /* Fill in tunneling parameters if necessary */
2476 cd_tunneling_params = 0;
2477 if (ol_flags & PKT_TX_TUNNEL_MASK)
2478 ice_parse_tunneling_params(ol_flags, tx_offload,
2479 &cd_tunneling_params);
2481 /* Enable checksum offloading */
2482 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
2483 ice_txd_enable_checksum(ol_flags, &td_cmd,
2484 &td_offset, tx_offload);
2488 /* Setup TX context descriptor if required */
2489 volatile struct ice_tx_ctx_desc *ctx_txd =
2490 (volatile struct ice_tx_ctx_desc *)
2492 uint16_t cd_l2tag2 = 0;
2493 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2495 txn = &sw_ring[txe->next_id];
2496 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2498 rte_pktmbuf_free_seg(txe->mbuf);
2502 if (ol_flags & PKT_TX_TCP_SEG)
2503 cd_type_cmd_tso_mss |=
2504 ice_set_tso_ctx(tx_pkt, tx_offload);
2506 ctx_txd->tunneling_params =
2507 rte_cpu_to_le_32(cd_tunneling_params);
2509 /* TX context descriptor based double VLAN insert */
2510 if (ol_flags & PKT_TX_QINQ) {
2511 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2512 cd_type_cmd_tso_mss |=
2513 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2514 ICE_TXD_CTX_QW1_CMD_S);
2516 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2518 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2520 txe->last_id = tx_last;
2521 tx_id = txe->next_id;
2527 txd = &tx_ring[tx_id];
2528 txn = &sw_ring[txe->next_id];
2531 rte_pktmbuf_free_seg(txe->mbuf);
2534 /* Setup TX Descriptor */
2535 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2536 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2537 txd->cmd_type_offset_bsz =
2538 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2539 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2540 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2541 ((uint64_t)m_seg->data_len <<
2542 ICE_TXD_QW1_TX_BUF_SZ_S) |
2543 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2545 txe->last_id = tx_last;
2546 tx_id = txe->next_id;
2548 m_seg = m_seg->next;
2551 /* fill the last descriptor with End of Packet (EOP) bit */
2552 td_cmd |= ICE_TX_DESC_CMD_EOP;
2553 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2554 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2556 /* set RS bit on the last descriptor of one packet */
2557 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2558 PMD_TX_FREE_LOG(DEBUG,
2559 "Setting RS bit on TXD id="
2560 "%4u (port=%d queue=%d)",
2561 tx_last, txq->port_id, txq->queue_id);
2563 td_cmd |= ICE_TX_DESC_CMD_RS;
2565 /* Update txq RS bit counters */
2566 txq->nb_tx_used = 0;
2568 txd->cmd_type_offset_bsz |=
2569 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2573 /* update Tail register */
2574 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2575 txq->tx_tail = tx_id;
2580 static inline int __attribute__((always_inline))
2581 ice_tx_free_bufs(struct ice_tx_queue *txq)
2583 struct ice_tx_entry *txep;
2586 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2587 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2588 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2591 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2593 for (i = 0; i < txq->tx_rs_thresh; i++)
2594 rte_prefetch0((txep + i)->mbuf);
2596 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2597 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2598 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2602 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2603 rte_pktmbuf_free_seg(txep->mbuf);
2608 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2609 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2610 if (txq->tx_next_dd >= txq->nb_tx_desc)
2611 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2613 return txq->tx_rs_thresh;
2616 /* Populate 4 descriptors with data from 4 mbufs */
2618 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2623 for (i = 0; i < 4; i++, txdp++, pkts++) {
2624 dma_addr = rte_mbuf_data_iova(*pkts);
2625 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2626 txdp->cmd_type_offset_bsz =
2627 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2628 (*pkts)->data_len, 0);
2632 /* Populate 1 descriptor with data from 1 mbuf */
2634 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2638 dma_addr = rte_mbuf_data_iova(*pkts);
2639 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2640 txdp->cmd_type_offset_bsz =
2641 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2642 (*pkts)->data_len, 0);
2646 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2649 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2650 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2651 const int N_PER_LOOP = 4;
2652 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2653 int mainpart, leftover;
2657 * Process most of the packets in chunks of N pkts. Any
2658 * leftover packets will get processed one at a time.
2660 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2661 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2662 for (i = 0; i < mainpart; i += N_PER_LOOP) {
2663 /* Copy N mbuf pointers to the S/W ring */
2664 for (j = 0; j < N_PER_LOOP; ++j)
2665 (txep + i + j)->mbuf = *(pkts + i + j);
2666 tx4(txdp + i, pkts + i);
2669 if (unlikely(leftover > 0)) {
2670 for (i = 0; i < leftover; ++i) {
2671 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2672 tx1(txdp + mainpart + i, pkts + mainpart + i);
2677 static inline uint16_t
2678 tx_xmit_pkts(struct ice_tx_queue *txq,
2679 struct rte_mbuf **tx_pkts,
2682 volatile struct ice_tx_desc *txr = txq->tx_ring;
2686 * Begin scanning the H/W ring for done descriptors when the number
2687 * of available descriptors drops below tx_free_thresh. For each done
2688 * descriptor, free the associated buffer.
2690 if (txq->nb_tx_free < txq->tx_free_thresh)
2691 ice_tx_free_bufs(txq);
2693 /* Use available descriptor only */
2694 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2695 if (unlikely(!nb_pkts))
2698 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2699 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2700 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2701 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2702 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2703 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2705 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2709 /* Fill hardware descriptor ring with mbuf data */
2710 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2711 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2713 /* Determin if RS bit needs to be set */
2714 if (txq->tx_tail > txq->tx_next_rs) {
2715 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2716 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2719 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2720 if (txq->tx_next_rs >= txq->nb_tx_desc)
2721 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2724 if (txq->tx_tail >= txq->nb_tx_desc)
2727 /* Update the tx tail register */
2728 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2734 ice_xmit_pkts_simple(void *tx_queue,
2735 struct rte_mbuf **tx_pkts,
2740 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2741 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2745 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2748 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2749 &tx_pkts[nb_tx], num);
2750 nb_tx = (uint16_t)(nb_tx + ret);
2751 nb_pkts = (uint16_t)(nb_pkts - ret);
2759 void __attribute__((cold))
2760 ice_set_rx_function(struct rte_eth_dev *dev)
2762 PMD_INIT_FUNC_TRACE();
2763 struct ice_adapter *ad =
2764 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2766 struct ice_rx_queue *rxq;
2768 bool use_avx2 = false;
2770 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2771 if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
2772 ad->rx_vec_allowed = true;
2773 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2774 rxq = dev->data->rx_queues[i];
2775 if (rxq && ice_rxq_vec_setup(rxq)) {
2776 ad->rx_vec_allowed = false;
2781 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2782 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2786 ad->rx_vec_allowed = false;
2790 if (ad->rx_vec_allowed) {
2791 if (dev->data->scattered_rx) {
2793 "Using %sVector Scattered Rx (port %d).",
2794 use_avx2 ? "avx2 " : "",
2795 dev->data->port_id);
2796 dev->rx_pkt_burst = use_avx2 ?
2797 ice_recv_scattered_pkts_vec_avx2 :
2798 ice_recv_scattered_pkts_vec;
2800 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2801 use_avx2 ? "avx2 " : "",
2802 dev->data->port_id);
2803 dev->rx_pkt_burst = use_avx2 ?
2804 ice_recv_pkts_vec_avx2 :
2812 if (dev->data->scattered_rx) {
2813 /* Set the non-LRO scattered function */
2815 "Using a Scattered function on port %d.",
2816 dev->data->port_id);
2817 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2818 } else if (ad->rx_bulk_alloc_allowed) {
2820 "Rx Burst Bulk Alloc Preconditions are "
2821 "satisfied. Rx Burst Bulk Alloc function "
2822 "will be used on port %d.",
2823 dev->data->port_id);
2824 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2827 "Rx Burst Bulk Alloc Preconditions are not "
2828 "satisfied, Normal Rx will be used on port %d.",
2829 dev->data->port_id);
2830 dev->rx_pkt_burst = ice_recv_pkts;
2834 static const struct {
2835 eth_rx_burst_t pkt_burst;
2837 } ice_rx_burst_infos[] = {
2838 { ice_recv_scattered_pkts, "Scalar Scattered" },
2839 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
2840 { ice_recv_pkts, "Scalar" },
2842 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
2843 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
2844 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
2845 { ice_recv_pkts_vec, "Vector SSE" },
2850 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2851 struct rte_eth_burst_mode *mode)
2853 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2857 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
2858 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
2859 snprintf(mode->info, sizeof(mode->info), "%s",
2860 ice_rx_burst_infos[i].info);
2869 void __attribute__((cold))
2870 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
2872 struct ice_adapter *ad =
2873 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2875 /* Use a simple Tx queue if possible (only fast free is allowed) */
2876 ad->tx_simple_allowed =
2878 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
2879 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
2881 if (ad->tx_simple_allowed)
2882 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
2886 "Simple Tx can NOT be enabled on Tx queue %u.",
2890 /*********************************************************************
2894 **********************************************************************/
2895 /* The default values of TSO MSS */
2896 #define ICE_MIN_TSO_MSS 64
2897 #define ICE_MAX_TSO_MSS 9728
2898 #define ICE_MAX_TSO_FRAME_SIZE 262144
2900 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2907 for (i = 0; i < nb_pkts; i++) {
2909 ol_flags = m->ol_flags;
2911 if (ol_flags & PKT_TX_TCP_SEG &&
2912 (m->tso_segsz < ICE_MIN_TSO_MSS ||
2913 m->tso_segsz > ICE_MAX_TSO_MSS ||
2914 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
2916 * MSS outside the range are considered malicious
2922 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2923 ret = rte_validate_tx_offload(m);
2929 ret = rte_net_intel_cksum_prepare(m);
2938 void __attribute__((cold))
2939 ice_set_tx_function(struct rte_eth_dev *dev)
2941 struct ice_adapter *ad =
2942 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2944 struct ice_tx_queue *txq;
2946 bool use_avx2 = false;
2948 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2949 if (!ice_tx_vec_dev_check(dev)) {
2950 ad->tx_vec_allowed = true;
2951 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2952 txq = dev->data->tx_queues[i];
2953 if (txq && ice_txq_vec_setup(txq)) {
2954 ad->tx_vec_allowed = false;
2959 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2960 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2964 ad->tx_vec_allowed = false;
2968 if (ad->tx_vec_allowed) {
2969 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2970 use_avx2 ? "avx2 " : "",
2971 dev->data->port_id);
2972 dev->tx_pkt_burst = use_avx2 ?
2973 ice_xmit_pkts_vec_avx2 :
2975 dev->tx_pkt_prepare = NULL;
2981 if (ad->tx_simple_allowed) {
2982 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
2983 dev->tx_pkt_burst = ice_xmit_pkts_simple;
2984 dev->tx_pkt_prepare = NULL;
2986 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
2987 dev->tx_pkt_burst = ice_xmit_pkts;
2988 dev->tx_pkt_prepare = ice_prep_pkts;
2992 static const struct {
2993 eth_tx_burst_t pkt_burst;
2995 } ice_tx_burst_infos[] = {
2996 { ice_xmit_pkts_simple, "Scalar Simple" },
2997 { ice_xmit_pkts, "Scalar" },
2999 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3000 { ice_xmit_pkts_vec, "Vector SSE" },
3005 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3006 struct rte_eth_burst_mode *mode)
3008 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3012 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3013 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3014 snprintf(mode->info, sizeof(mode->info), "%s",
3015 ice_tx_burst_infos[i].info);
3024 /* For each value it means, datasheet of hardware can tell more details
3026 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3028 static inline uint32_t
3029 ice_get_default_pkt_type(uint16_t ptype)
3031 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3032 __rte_cache_aligned = {
3035 [1] = RTE_PTYPE_L2_ETHER,
3036 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3037 /* [3] - [5] reserved */
3038 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3039 /* [7] - [10] reserved */
3040 [11] = RTE_PTYPE_L2_ETHER_ARP,
3041 /* [12] - [21] reserved */
3043 /* Non tunneled IPv4 */
3044 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3046 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3047 RTE_PTYPE_L4_NONFRAG,
3048 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3051 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3053 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3055 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3059 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3060 RTE_PTYPE_TUNNEL_IP |
3061 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3062 RTE_PTYPE_INNER_L4_FRAG,
3063 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3064 RTE_PTYPE_TUNNEL_IP |
3065 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3066 RTE_PTYPE_INNER_L4_NONFRAG,
3067 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3068 RTE_PTYPE_TUNNEL_IP |
3069 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3070 RTE_PTYPE_INNER_L4_UDP,
3072 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3073 RTE_PTYPE_TUNNEL_IP |
3074 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3075 RTE_PTYPE_INNER_L4_TCP,
3076 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3077 RTE_PTYPE_TUNNEL_IP |
3078 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3079 RTE_PTYPE_INNER_L4_SCTP,
3080 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3081 RTE_PTYPE_TUNNEL_IP |
3082 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3083 RTE_PTYPE_INNER_L4_ICMP,
3086 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3087 RTE_PTYPE_TUNNEL_IP |
3088 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3089 RTE_PTYPE_INNER_L4_FRAG,
3090 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3091 RTE_PTYPE_TUNNEL_IP |
3092 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3093 RTE_PTYPE_INNER_L4_NONFRAG,
3094 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3095 RTE_PTYPE_TUNNEL_IP |
3096 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3097 RTE_PTYPE_INNER_L4_UDP,
3099 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3100 RTE_PTYPE_TUNNEL_IP |
3101 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3102 RTE_PTYPE_INNER_L4_TCP,
3103 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3104 RTE_PTYPE_TUNNEL_IP |
3105 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3106 RTE_PTYPE_INNER_L4_SCTP,
3107 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3108 RTE_PTYPE_TUNNEL_IP |
3109 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3110 RTE_PTYPE_INNER_L4_ICMP,
3112 /* IPv4 --> GRE/Teredo/VXLAN */
3113 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3114 RTE_PTYPE_TUNNEL_GRENAT,
3116 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3117 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3118 RTE_PTYPE_TUNNEL_GRENAT |
3119 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3120 RTE_PTYPE_INNER_L4_FRAG,
3121 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3122 RTE_PTYPE_TUNNEL_GRENAT |
3123 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3124 RTE_PTYPE_INNER_L4_NONFRAG,
3125 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3126 RTE_PTYPE_TUNNEL_GRENAT |
3127 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3128 RTE_PTYPE_INNER_L4_UDP,
3130 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3131 RTE_PTYPE_TUNNEL_GRENAT |
3132 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3133 RTE_PTYPE_INNER_L4_TCP,
3134 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3135 RTE_PTYPE_TUNNEL_GRENAT |
3136 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3137 RTE_PTYPE_INNER_L4_SCTP,
3138 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3139 RTE_PTYPE_TUNNEL_GRENAT |
3140 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3141 RTE_PTYPE_INNER_L4_ICMP,
3143 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3144 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3145 RTE_PTYPE_TUNNEL_GRENAT |
3146 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3147 RTE_PTYPE_INNER_L4_FRAG,
3148 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3149 RTE_PTYPE_TUNNEL_GRENAT |
3150 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3151 RTE_PTYPE_INNER_L4_NONFRAG,
3152 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3153 RTE_PTYPE_TUNNEL_GRENAT |
3154 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3155 RTE_PTYPE_INNER_L4_UDP,
3157 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3158 RTE_PTYPE_TUNNEL_GRENAT |
3159 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3160 RTE_PTYPE_INNER_L4_TCP,
3161 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3162 RTE_PTYPE_TUNNEL_GRENAT |
3163 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3164 RTE_PTYPE_INNER_L4_SCTP,
3165 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3166 RTE_PTYPE_TUNNEL_GRENAT |
3167 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3168 RTE_PTYPE_INNER_L4_ICMP,
3170 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3171 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3172 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3174 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3175 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3176 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3177 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3178 RTE_PTYPE_INNER_L4_FRAG,
3179 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3180 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3181 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3182 RTE_PTYPE_INNER_L4_NONFRAG,
3183 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3184 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3185 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3186 RTE_PTYPE_INNER_L4_UDP,
3188 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3189 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3190 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3191 RTE_PTYPE_INNER_L4_TCP,
3192 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3193 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3194 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3195 RTE_PTYPE_INNER_L4_SCTP,
3196 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3197 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3198 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3199 RTE_PTYPE_INNER_L4_ICMP,
3201 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3202 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3203 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3204 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3205 RTE_PTYPE_INNER_L4_FRAG,
3206 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3207 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3208 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3209 RTE_PTYPE_INNER_L4_NONFRAG,
3210 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3211 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3212 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3213 RTE_PTYPE_INNER_L4_UDP,
3215 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3216 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3217 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3218 RTE_PTYPE_INNER_L4_TCP,
3219 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3220 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3221 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3222 RTE_PTYPE_INNER_L4_SCTP,
3223 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3224 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3225 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3226 RTE_PTYPE_INNER_L4_ICMP,
3227 /* [73] - [87] reserved */
3229 /* Non tunneled IPv6 */
3230 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3232 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3233 RTE_PTYPE_L4_NONFRAG,
3234 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3237 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3239 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3241 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3245 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3246 RTE_PTYPE_TUNNEL_IP |
3247 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3248 RTE_PTYPE_INNER_L4_FRAG,
3249 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3250 RTE_PTYPE_TUNNEL_IP |
3251 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3252 RTE_PTYPE_INNER_L4_NONFRAG,
3253 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3254 RTE_PTYPE_TUNNEL_IP |
3255 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3256 RTE_PTYPE_INNER_L4_UDP,
3258 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3259 RTE_PTYPE_TUNNEL_IP |
3260 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3261 RTE_PTYPE_INNER_L4_TCP,
3262 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3263 RTE_PTYPE_TUNNEL_IP |
3264 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3265 RTE_PTYPE_INNER_L4_SCTP,
3266 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3267 RTE_PTYPE_TUNNEL_IP |
3268 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3269 RTE_PTYPE_INNER_L4_ICMP,
3272 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3273 RTE_PTYPE_TUNNEL_IP |
3274 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3275 RTE_PTYPE_INNER_L4_FRAG,
3276 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3277 RTE_PTYPE_TUNNEL_IP |
3278 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3279 RTE_PTYPE_INNER_L4_NONFRAG,
3280 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3281 RTE_PTYPE_TUNNEL_IP |
3282 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3283 RTE_PTYPE_INNER_L4_UDP,
3284 /* [105] reserved */
3285 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3286 RTE_PTYPE_TUNNEL_IP |
3287 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3288 RTE_PTYPE_INNER_L4_TCP,
3289 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3290 RTE_PTYPE_TUNNEL_IP |
3291 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3292 RTE_PTYPE_INNER_L4_SCTP,
3293 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3294 RTE_PTYPE_TUNNEL_IP |
3295 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3296 RTE_PTYPE_INNER_L4_ICMP,
3298 /* IPv6 --> GRE/Teredo/VXLAN */
3299 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3300 RTE_PTYPE_TUNNEL_GRENAT,
3302 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3303 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3304 RTE_PTYPE_TUNNEL_GRENAT |
3305 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3306 RTE_PTYPE_INNER_L4_FRAG,
3307 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3308 RTE_PTYPE_TUNNEL_GRENAT |
3309 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3310 RTE_PTYPE_INNER_L4_NONFRAG,
3311 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3312 RTE_PTYPE_TUNNEL_GRENAT |
3313 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3314 RTE_PTYPE_INNER_L4_UDP,
3315 /* [113] reserved */
3316 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3317 RTE_PTYPE_TUNNEL_GRENAT |
3318 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3319 RTE_PTYPE_INNER_L4_TCP,
3320 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3321 RTE_PTYPE_TUNNEL_GRENAT |
3322 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3323 RTE_PTYPE_INNER_L4_SCTP,
3324 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3325 RTE_PTYPE_TUNNEL_GRENAT |
3326 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3327 RTE_PTYPE_INNER_L4_ICMP,
3329 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3330 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3331 RTE_PTYPE_TUNNEL_GRENAT |
3332 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3333 RTE_PTYPE_INNER_L4_FRAG,
3334 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3335 RTE_PTYPE_TUNNEL_GRENAT |
3336 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3337 RTE_PTYPE_INNER_L4_NONFRAG,
3338 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3339 RTE_PTYPE_TUNNEL_GRENAT |
3340 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3341 RTE_PTYPE_INNER_L4_UDP,
3342 /* [120] reserved */
3343 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3344 RTE_PTYPE_TUNNEL_GRENAT |
3345 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3346 RTE_PTYPE_INNER_L4_TCP,
3347 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3348 RTE_PTYPE_TUNNEL_GRENAT |
3349 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3350 RTE_PTYPE_INNER_L4_SCTP,
3351 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3352 RTE_PTYPE_TUNNEL_GRENAT |
3353 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3354 RTE_PTYPE_INNER_L4_ICMP,
3356 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3357 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3358 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3360 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3361 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3362 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3363 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3364 RTE_PTYPE_INNER_L4_FRAG,
3365 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3366 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3367 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3368 RTE_PTYPE_INNER_L4_NONFRAG,
3369 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3370 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3371 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3372 RTE_PTYPE_INNER_L4_UDP,
3373 /* [128] reserved */
3374 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3375 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3376 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3377 RTE_PTYPE_INNER_L4_TCP,
3378 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3379 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3380 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3381 RTE_PTYPE_INNER_L4_SCTP,
3382 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3383 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3384 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3385 RTE_PTYPE_INNER_L4_ICMP,
3387 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3388 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3389 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3390 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3391 RTE_PTYPE_INNER_L4_FRAG,
3392 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3393 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3394 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3395 RTE_PTYPE_INNER_L4_NONFRAG,
3396 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3397 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3398 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3399 RTE_PTYPE_INNER_L4_UDP,
3400 /* [135] reserved */
3401 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3402 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3403 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3404 RTE_PTYPE_INNER_L4_TCP,
3405 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3406 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3407 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3408 RTE_PTYPE_INNER_L4_SCTP,
3409 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3410 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3411 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3412 RTE_PTYPE_INNER_L4_ICMP,
3413 /* [139] - [299] reserved */
3416 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3417 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3419 /* PPPoE --> IPv4 */
3420 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3421 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3423 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3424 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3425 RTE_PTYPE_L4_NONFRAG,
3426 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3427 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3429 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3430 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3432 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3433 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3435 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3436 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3439 /* PPPoE --> IPv6 */
3440 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3441 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3443 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3444 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3445 RTE_PTYPE_L4_NONFRAG,
3446 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3447 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3449 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3450 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3452 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3453 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3455 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3456 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3458 /* [314] - [324] reserved */
3460 /* IPv4/IPv6 --> GTPC/GTPU */
3461 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3462 RTE_PTYPE_TUNNEL_GTPC,
3463 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3464 RTE_PTYPE_TUNNEL_GTPC,
3465 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3466 RTE_PTYPE_TUNNEL_GTPC,
3467 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3468 RTE_PTYPE_TUNNEL_GTPC,
3469 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3470 RTE_PTYPE_TUNNEL_GTPU,
3471 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3472 RTE_PTYPE_TUNNEL_GTPU,
3474 /* IPv4 --> GTPU --> IPv4 */
3475 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3476 RTE_PTYPE_TUNNEL_GTPU |
3477 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3478 RTE_PTYPE_INNER_L4_FRAG,
3479 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3480 RTE_PTYPE_TUNNEL_GTPU |
3481 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3482 RTE_PTYPE_INNER_L4_NONFRAG,
3483 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3484 RTE_PTYPE_TUNNEL_GTPU |
3485 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3486 RTE_PTYPE_INNER_L4_UDP,
3487 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3488 RTE_PTYPE_TUNNEL_GTPU |
3489 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3490 RTE_PTYPE_INNER_L4_TCP,
3491 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3492 RTE_PTYPE_TUNNEL_GTPU |
3493 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3494 RTE_PTYPE_INNER_L4_ICMP,
3496 /* IPv6 --> GTPU --> IPv4 */
3497 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3498 RTE_PTYPE_TUNNEL_GTPU |
3499 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3500 RTE_PTYPE_INNER_L4_FRAG,
3501 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3502 RTE_PTYPE_TUNNEL_GTPU |
3503 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3504 RTE_PTYPE_INNER_L4_NONFRAG,
3505 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3506 RTE_PTYPE_TUNNEL_GTPU |
3507 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3508 RTE_PTYPE_INNER_L4_UDP,
3509 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3510 RTE_PTYPE_TUNNEL_GTPU |
3511 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3512 RTE_PTYPE_INNER_L4_TCP,
3513 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3514 RTE_PTYPE_TUNNEL_GTPU |
3515 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3516 RTE_PTYPE_INNER_L4_ICMP,
3518 /* IPv4 --> GTPU --> IPv6 */
3519 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3520 RTE_PTYPE_TUNNEL_GTPU |
3521 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3522 RTE_PTYPE_INNER_L4_FRAG,
3523 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3524 RTE_PTYPE_TUNNEL_GTPU |
3525 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3526 RTE_PTYPE_INNER_L4_NONFRAG,
3527 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3528 RTE_PTYPE_TUNNEL_GTPU |
3529 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3530 RTE_PTYPE_INNER_L4_UDP,
3531 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3532 RTE_PTYPE_TUNNEL_GTPU |
3533 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3534 RTE_PTYPE_INNER_L4_TCP,
3535 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3536 RTE_PTYPE_TUNNEL_GTPU |
3537 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3538 RTE_PTYPE_INNER_L4_ICMP,
3540 /* IPv6 --> GTPU --> IPv6 */
3541 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3542 RTE_PTYPE_TUNNEL_GTPU |
3543 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3544 RTE_PTYPE_INNER_L4_FRAG,
3545 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3546 RTE_PTYPE_TUNNEL_GTPU |
3547 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3548 RTE_PTYPE_INNER_L4_NONFRAG,
3549 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3550 RTE_PTYPE_TUNNEL_GTPU |
3551 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3552 RTE_PTYPE_INNER_L4_UDP,
3553 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3554 RTE_PTYPE_TUNNEL_GTPU |
3555 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3556 RTE_PTYPE_INNER_L4_TCP,
3557 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3558 RTE_PTYPE_TUNNEL_GTPU |
3559 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3560 RTE_PTYPE_INNER_L4_ICMP,
3561 /* All others reserved */
3564 return type_table[ptype];
3567 void __attribute__((cold))
3568 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3570 struct ice_adapter *ad =
3571 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3574 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3575 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
3578 #define ICE_FDIR_MAX_WAIT_US 10000
3581 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
3583 struct ice_tx_queue *txq = pf->fdir.txq;
3584 volatile struct ice_fltr_desc *fdirdp;
3585 volatile struct ice_tx_desc *txdp;
3589 fdirdp = (volatile struct ice_fltr_desc *)
3590 (&txq->tx_ring[txq->tx_tail]);
3591 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
3592 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
3594 txdp = &txq->tx_ring[txq->tx_tail + 1];
3595 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
3596 td_cmd = ICE_TX_DESC_CMD_EOP |
3597 ICE_TX_DESC_CMD_RS |
3598 ICE_TX_DESC_CMD_DUMMY;
3600 txdp->cmd_type_offset_bsz =
3601 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
3604 if (txq->tx_tail >= txq->nb_tx_desc)
3606 /* Update the tx tail register */
3607 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
3608 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
3609 if ((txdp->cmd_type_offset_bsz &
3610 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
3611 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3615 if (i >= ICE_FDIR_MAX_WAIT_US) {
3617 "Failed to program FDIR filter: time out to get DD on tx queue.");