1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_driver.h>
10 #define ICE_TD_CMD ICE_TX_DESC_CMD_EOP
12 #define ICE_TX_CKSUM_OFFLOAD_MASK ( \
16 PKT_TX_OUTER_IP_CKSUM)
18 #define ICE_RX_ERR_BITS 0x3f
20 static enum ice_status
21 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
23 struct ice_vsi *vsi = rxq->vsi;
24 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
25 struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
26 struct ice_rlan_ctx rx_ctx;
28 uint16_t buf_size, len;
29 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
33 * The kernel driver uses flex descriptor. It sets the register
34 * to flex descriptor mode.
35 * DPDK uses legacy descriptor. It should set the register back
36 * to the default value, then uses legacy descriptor mode.
38 regval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
39 QRXFLXP_CNTXT_RXDID_PRIO_M;
40 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
42 /* Set buffer size as the head split is disabled. */
43 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
44 RTE_PKTMBUF_HEADROOM);
46 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
47 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
48 rxq->max_pkt_len = RTE_MIN(len,
49 dev->data->dev_conf.rxmode.max_rx_pkt_len);
51 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
52 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
53 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
54 PMD_DRV_LOG(ERR, "maximum packet length must "
55 "be larger than %u and smaller than %u,"
56 "as jumbo frame is enabled",
57 (uint32_t)ETHER_MAX_LEN,
58 (uint32_t)ICE_FRAME_SIZE_MAX);
62 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
63 rxq->max_pkt_len > ETHER_MAX_LEN) {
64 PMD_DRV_LOG(ERR, "maximum packet length must be "
65 "larger than %u and smaller than %u, "
66 "as jumbo frame is disabled",
67 (uint32_t)ETHER_MIN_LEN,
68 (uint32_t)ETHER_MAX_LEN);
73 memset(&rx_ctx, 0, sizeof(rx_ctx));
75 rx_ctx.base = rxq->rx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
76 rx_ctx.qlen = rxq->nb_rx_desc;
77 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
78 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
79 rx_ctx.dtype = 0; /* No Header Split mode */
80 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
81 rx_ctx.dsize = 1; /* 32B descriptors */
83 rx_ctx.rxmax = rxq->max_pkt_len;
84 /* TPH: Transaction Layer Packet (TLP) processing hints */
85 rx_ctx.tphrdesc_ena = 1;
86 rx_ctx.tphwdesc_ena = 1;
87 rx_ctx.tphdata_ena = 1;
88 rx_ctx.tphhead_ena = 1;
89 /* Low Receive Queue Threshold defined in 64 descriptors units.
90 * When the number of free descriptors goes below the lrxqthresh,
91 * an immediate interrupt is triggered.
93 rx_ctx.lrxqthresh = 2;
94 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
98 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
100 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
104 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
106 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
111 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
112 RTE_PKTMBUF_HEADROOM);
114 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
116 /* Init the Rx tail register*/
117 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
122 /* Allocate mbufs for all descriptors in rx queue */
124 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
126 struct ice_rx_entry *rxe = rxq->sw_ring;
130 for (i = 0; i < rxq->nb_rx_desc; i++) {
131 volatile union ice_rx_desc *rxd;
132 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
134 if (unlikely(!mbuf)) {
135 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
139 rte_mbuf_refcnt_set(mbuf, 1);
141 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
143 mbuf->port = rxq->port_id;
146 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
148 rxd = &rxq->rx_ring[i];
149 rxd->read.pkt_addr = dma_addr;
150 rxd->read.hdr_addr = 0;
151 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
161 /* Free all mbufs for descriptors in rx queue */
163 ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
167 if (!rxq || !rxq->sw_ring) {
168 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
172 for (i = 0; i < rxq->nb_rx_desc; i++) {
173 if (rxq->sw_ring[i].mbuf) {
174 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
175 rxq->sw_ring[i].mbuf = NULL;
178 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
179 if (rxq->rx_nb_avail == 0)
181 for (i = 0; i < rxq->rx_nb_avail; i++) {
182 struct rte_mbuf *mbuf;
184 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
185 rte_pktmbuf_free_seg(mbuf);
187 rxq->rx_nb_avail = 0;
188 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
191 /* turn on or off rx queue
192 * @q_idx: queue index in pf scope
193 * @on: turn on or off the queue
196 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
201 /* QRX_CTRL = QRX_ENA */
202 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
205 if (reg & QRX_CTRL_QENA_STAT_M)
206 return 0; /* Already on, skip */
207 reg |= QRX_CTRL_QENA_REQ_M;
209 if (!(reg & QRX_CTRL_QENA_STAT_M))
210 return 0; /* Already off, skip */
211 reg &= ~QRX_CTRL_QENA_REQ_M;
214 /* Write the register */
215 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
216 /* Check the result. It is said that QENA_STAT
217 * follows the QENA_REQ not more than 10 use.
218 * TODO: need to change the wait counter later
220 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
221 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
222 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
224 if ((reg & QRX_CTRL_QENA_REQ_M) &&
225 (reg & QRX_CTRL_QENA_STAT_M))
228 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
229 !(reg & QRX_CTRL_QENA_STAT_M))
234 /* Check if it is timeout */
235 if (j >= ICE_CHK_Q_ENA_COUNT) {
236 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
237 (on ? "enable" : "disable"), q_idx);
245 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
246 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
248 ice_check_rx_burst_bulk_alloc_preconditions
249 (__rte_unused struct ice_rx_queue *rxq)
254 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
255 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
256 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
257 "rxq->rx_free_thresh=%d, "
258 "ICE_RX_MAX_BURST=%d",
259 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
261 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
262 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
263 "rxq->rx_free_thresh=%d, "
264 "rxq->nb_rx_desc=%d",
265 rxq->rx_free_thresh, rxq->nb_rx_desc);
267 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
268 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
269 "rxq->nb_rx_desc=%d, "
270 "rxq->rx_free_thresh=%d",
271 rxq->nb_rx_desc, rxq->rx_free_thresh);
281 /* reset fields in ice_rx_queue back to default */
283 ice_reset_rx_queue(struct ice_rx_queue *rxq)
289 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
293 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
294 if (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
295 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
297 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
298 len = rxq->nb_rx_desc;
300 for (i = 0; i < len * sizeof(union ice_rx_desc); i++)
301 ((volatile char *)rxq->rx_ring)[i] = 0;
303 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
304 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
305 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
306 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
308 rxq->rx_nb_avail = 0;
309 rxq->rx_next_avail = 0;
310 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
311 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
315 rxq->pkt_first_seg = NULL;
316 rxq->pkt_last_seg = NULL;
320 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
322 struct ice_rx_queue *rxq;
324 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
326 PMD_INIT_FUNC_TRACE();
328 if (rx_queue_id >= dev->data->nb_rx_queues) {
329 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
330 rx_queue_id, dev->data->nb_rx_queues);
334 rxq = dev->data->rx_queues[rx_queue_id];
335 if (!rxq || !rxq->q_set) {
336 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
341 err = ice_program_hw_rx_queue(rxq);
343 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
348 err = ice_alloc_rx_queue_mbufs(rxq);
350 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
356 /* Init the RX tail register. */
357 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
359 err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
361 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
364 ice_rx_queue_release_mbufs(rxq);
365 ice_reset_rx_queue(rxq);
369 dev->data->rx_queue_state[rx_queue_id] =
370 RTE_ETH_QUEUE_STATE_STARTED;
376 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
378 struct ice_rx_queue *rxq;
380 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
382 if (rx_queue_id < dev->data->nb_rx_queues) {
383 rxq = dev->data->rx_queues[rx_queue_id];
385 err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
387 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
391 ice_rx_queue_release_mbufs(rxq);
392 ice_reset_rx_queue(rxq);
393 dev->data->rx_queue_state[rx_queue_id] =
394 RTE_ETH_QUEUE_STATE_STOPPED;
401 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
403 struct ice_tx_queue *txq;
407 struct ice_aqc_add_tx_qgrp txq_elem;
408 struct ice_tlan_ctx tx_ctx;
410 PMD_INIT_FUNC_TRACE();
412 if (tx_queue_id >= dev->data->nb_tx_queues) {
413 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
414 tx_queue_id, dev->data->nb_tx_queues);
418 txq = dev->data->tx_queues[tx_queue_id];
419 if (!txq || !txq->q_set) {
420 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
426 hw = ICE_VSI_TO_HW(vsi);
428 memset(&txq_elem, 0, sizeof(txq_elem));
429 memset(&tx_ctx, 0, sizeof(tx_ctx));
430 txq_elem.num_txqs = 1;
431 txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
433 tx_ctx.base = txq->tx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;
434 tx_ctx.qlen = txq->nb_tx_desc;
435 tx_ctx.pf_num = hw->pf_id;
436 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
437 tx_ctx.src_vsi = vsi->vsi_id;
438 tx_ctx.port_num = hw->port_info->lport;
439 tx_ctx.tso_ena = 1; /* tso enable */
440 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
441 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
443 ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
446 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
448 /* Init the Tx tail register*/
449 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
451 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1, &txq_elem,
452 sizeof(txq_elem), NULL);
454 PMD_DRV_LOG(ERR, "Failed to add lan txq");
457 /* store the schedule node id */
458 txq->q_teid = txq_elem.txqs[0].q_teid;
460 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
464 /* Free all mbufs for descriptors in tx queue */
466 ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
470 if (!txq || !txq->sw_ring) {
471 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
475 for (i = 0; i < txq->nb_tx_desc; i++) {
476 if (txq->sw_ring[i].mbuf) {
477 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
478 txq->sw_ring[i].mbuf = NULL;
484 ice_reset_tx_queue(struct ice_tx_queue *txq)
486 struct ice_tx_entry *txe;
487 uint16_t i, prev, size;
490 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
495 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
496 for (i = 0; i < size; i++)
497 ((volatile char *)txq->tx_ring)[i] = 0;
499 prev = (uint16_t)(txq->nb_tx_desc - 1);
500 for (i = 0; i < txq->nb_tx_desc; i++) {
501 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
503 txd->cmd_type_offset_bsz =
504 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
507 txe[prev].next_id = i;
511 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
512 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
517 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
518 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
522 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
524 struct ice_tx_queue *txq;
525 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
526 enum ice_status status;
530 if (tx_queue_id >= dev->data->nb_tx_queues) {
531 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
532 tx_queue_id, dev->data->nb_tx_queues);
536 txq = dev->data->tx_queues[tx_queue_id];
538 PMD_DRV_LOG(ERR, "TX queue %u is not available",
543 q_ids[0] = txq->reg_idx;
544 q_teids[0] = txq->q_teid;
546 status = ice_dis_vsi_txq(hw->port_info, 1, q_ids, q_teids,
547 ICE_NO_RESET, 0, NULL);
548 if (status != ICE_SUCCESS) {
549 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
553 ice_tx_queue_release_mbufs(txq);
554 ice_reset_tx_queue(txq);
555 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
561 ice_rx_queue_setup(struct rte_eth_dev *dev,
564 unsigned int socket_id,
565 const struct rte_eth_rxconf *rx_conf,
566 struct rte_mempool *mp)
568 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
569 struct ice_adapter *ad =
570 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
571 struct ice_vsi *vsi = pf->main_vsi;
572 struct ice_rx_queue *rxq;
573 const struct rte_memzone *rz;
576 int use_def_burst_func = 1;
578 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
579 nb_desc > ICE_MAX_RING_DESC ||
580 nb_desc < ICE_MIN_RING_DESC) {
581 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
586 /* Free memory if needed */
587 if (dev->data->rx_queues[queue_idx]) {
588 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
589 dev->data->rx_queues[queue_idx] = NULL;
592 /* Allocate the rx queue data structure */
593 rxq = rte_zmalloc_socket(NULL,
594 sizeof(struct ice_rx_queue),
598 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
599 "rx queue data structure");
603 rxq->nb_rx_desc = nb_desc;
604 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
605 rxq->queue_id = queue_idx;
607 rxq->reg_idx = vsi->base_queue + queue_idx;
608 rxq->port_id = dev->data->port_id;
609 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
610 rxq->crc_len = ETHER_CRC_LEN;
614 rxq->drop_en = rx_conf->rx_drop_en;
616 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
618 /* Allocate the maximun number of RX ring hardware descriptor. */
619 len = ICE_MAX_RING_DESC;
621 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
623 * Allocating a little more memory because vectorized/bulk_alloc Rx
624 * functions doesn't check boundaries each time.
626 len += ICE_RX_MAX_BURST;
629 /* Allocate the maximum number of RX ring hardware descriptor. */
630 ring_size = sizeof(union ice_rx_desc) * len;
631 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
632 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
633 ring_size, ICE_RING_BASE_ALIGN,
636 ice_rx_queue_release(rxq);
637 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
641 /* Zero all the descriptors in the ring. */
642 memset(rz->addr, 0, ring_size);
644 rxq->rx_ring_phys_addr = rz->phys_addr;
645 rxq->rx_ring = (union ice_rx_desc *)rz->addr;
647 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
648 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
653 /* Allocate the software ring. */
654 rxq->sw_ring = rte_zmalloc_socket(NULL,
655 sizeof(struct ice_rx_entry) * len,
659 ice_rx_queue_release(rxq);
660 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
664 ice_reset_rx_queue(rxq);
666 dev->data->rx_queues[queue_idx] = rxq;
668 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
670 if (!use_def_burst_func) {
671 #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
672 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
673 "satisfied. Rx Burst Bulk Alloc function will be "
674 "used on port=%d, queue=%d.",
675 rxq->port_id, rxq->queue_id);
676 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
678 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
679 "not satisfied, Scattered Rx is requested, "
680 "or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is "
681 "not enabled on port=%d, queue=%d.",
682 rxq->port_id, rxq->queue_id);
683 ad->rx_bulk_alloc_allowed = false;
690 ice_rx_queue_release(void *rxq)
692 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
695 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
699 ice_rx_queue_release_mbufs(q);
700 rte_free(q->sw_ring);
705 ice_tx_queue_setup(struct rte_eth_dev *dev,
708 unsigned int socket_id,
709 const struct rte_eth_txconf *tx_conf)
711 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
712 struct ice_vsi *vsi = pf->main_vsi;
713 struct ice_tx_queue *txq;
714 const struct rte_memzone *tz;
716 uint16_t tx_rs_thresh, tx_free_thresh;
719 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
721 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
722 nb_desc > ICE_MAX_RING_DESC ||
723 nb_desc < ICE_MIN_RING_DESC) {
724 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
730 * The following two parameters control the setting of the RS bit on
731 * transmit descriptors. TX descriptors will have their RS bit set
732 * after txq->tx_rs_thresh descriptors have been used. The TX
733 * descriptor ring will be cleaned after txq->tx_free_thresh
734 * descriptors are used or if the number of descriptors required to
735 * transmit a packet is greater than the number of free TX descriptors.
737 * The following constraints must be satisfied:
738 * - tx_rs_thresh must be greater than 0.
739 * - tx_rs_thresh must be less than the size of the ring minus 2.
740 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
741 * - tx_rs_thresh must be a divisor of the ring size.
742 * - tx_free_thresh must be greater than 0.
743 * - tx_free_thresh must be less than the size of the ring minus 3.
745 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
746 * race condition, hence the maximum threshold constraints. When set
747 * to zero use default values.
749 tx_rs_thresh = (uint16_t)(tx_conf->tx_rs_thresh ?
750 tx_conf->tx_rs_thresh :
751 ICE_DEFAULT_TX_RSBIT_THRESH);
752 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
753 tx_conf->tx_free_thresh :
754 ICE_DEFAULT_TX_FREE_THRESH);
755 if (tx_rs_thresh >= (nb_desc - 2)) {
756 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
757 "number of TX descriptors minus 2. "
758 "(tx_rs_thresh=%u port=%d queue=%d)",
759 (unsigned int)tx_rs_thresh,
760 (int)dev->data->port_id,
764 if (tx_free_thresh >= (nb_desc - 3)) {
765 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
766 "tx_free_thresh must be less than the "
767 "number of TX descriptors minus 3. "
768 "(tx_free_thresh=%u port=%d queue=%d)",
769 (unsigned int)tx_free_thresh,
770 (int)dev->data->port_id,
774 if (tx_rs_thresh > tx_free_thresh) {
775 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
776 "equal to tx_free_thresh. (tx_free_thresh=%u"
777 " tx_rs_thresh=%u port=%d queue=%d)",
778 (unsigned int)tx_free_thresh,
779 (unsigned int)tx_rs_thresh,
780 (int)dev->data->port_id,
784 if ((nb_desc % tx_rs_thresh) != 0) {
785 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
786 "number of TX descriptors. (tx_rs_thresh=%u"
787 " port=%d queue=%d)",
788 (unsigned int)tx_rs_thresh,
789 (int)dev->data->port_id,
793 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
794 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
795 "tx_rs_thresh is greater than 1. "
796 "(tx_rs_thresh=%u port=%d queue=%d)",
797 (unsigned int)tx_rs_thresh,
798 (int)dev->data->port_id,
803 /* Free memory if needed. */
804 if (dev->data->tx_queues[queue_idx]) {
805 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
806 dev->data->tx_queues[queue_idx] = NULL;
809 /* Allocate the TX queue data structure. */
810 txq = rte_zmalloc_socket(NULL,
811 sizeof(struct ice_tx_queue),
815 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
816 "tx queue structure");
820 /* Allocate TX hardware ring descriptors. */
821 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
822 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
823 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
824 ring_size, ICE_RING_BASE_ALIGN,
827 ice_tx_queue_release(txq);
828 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
832 txq->nb_tx_desc = nb_desc;
833 txq->tx_rs_thresh = tx_rs_thresh;
834 txq->tx_free_thresh = tx_free_thresh;
835 txq->pthresh = tx_conf->tx_thresh.pthresh;
836 txq->hthresh = tx_conf->tx_thresh.hthresh;
837 txq->wthresh = tx_conf->tx_thresh.wthresh;
838 txq->queue_id = queue_idx;
840 txq->reg_idx = vsi->base_queue + queue_idx;
841 txq->port_id = dev->data->port_id;
842 txq->offloads = offloads;
844 txq->tx_deferred_start = tx_conf->tx_deferred_start;
846 txq->tx_ring_phys_addr = tz->phys_addr;
847 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
849 /* Allocate software ring */
851 rte_zmalloc_socket(NULL,
852 sizeof(struct ice_tx_entry) * nb_desc,
856 ice_tx_queue_release(txq);
857 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
861 ice_reset_tx_queue(txq);
863 dev->data->tx_queues[queue_idx] = txq;
869 ice_tx_queue_release(void *txq)
871 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
874 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
878 ice_tx_queue_release_mbufs(q);
879 rte_free(q->sw_ring);
884 ice_clear_queues(struct rte_eth_dev *dev)
888 PMD_INIT_FUNC_TRACE();
890 for (i = 0; i < dev->data->nb_tx_queues; i++) {
891 ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
892 ice_reset_tx_queue(dev->data->tx_queues[i]);
895 for (i = 0; i < dev->data->nb_rx_queues; i++) {
896 ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
897 ice_reset_rx_queue(dev->data->rx_queues[i]);
902 ice_free_queues(struct rte_eth_dev *dev)
906 PMD_INIT_FUNC_TRACE();
908 for (i = 0; i < dev->data->nb_rx_queues; i++) {
909 if (!dev->data->rx_queues[i])
911 ice_rx_queue_release(dev->data->rx_queues[i]);
912 dev->data->rx_queues[i] = NULL;
914 dev->data->nb_rx_queues = 0;
916 for (i = 0; i < dev->data->nb_tx_queues; i++) {
917 if (!dev->data->tx_queues[i])
919 ice_tx_queue_release(dev->data->tx_queues[i]);
920 dev->data->tx_queues[i] = NULL;
922 dev->data->nb_tx_queues = 0;