1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
8 #include <rte_malloc.h>
11 #include "bnxt_ring.h"
19 void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
21 if (txq && txq->cp_ring && txq->cp_ring->hw_stats)
22 txq->cp_ring->hw_stats = NULL;
25 static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
27 struct bnxt_sw_tx_bd *sw_ring;
30 if (!txq || !txq->tx_ring)
33 sw_ring = txq->tx_ring->tx_buf_ring;
35 for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) {
36 if (sw_ring[i].mbuf) {
37 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
38 sw_ring[i].mbuf = NULL;
44 void bnxt_free_tx_mbufs(struct bnxt *bp)
46 struct bnxt_tx_queue *txq;
49 for (i = 0; i < (int)bp->tx_nr_rings; i++) {
50 txq = bp->tx_queues[i];
51 bnxt_tx_queue_release_mbufs(txq);
55 void bnxt_tx_queue_release_op(void *tx_queue)
57 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
60 if (is_bnxt_in_error(txq->bp))
63 /* Free TX ring hardware descriptors */
64 bnxt_tx_queue_release_mbufs(txq);
66 bnxt_free_ring(txq->tx_ring->tx_ring_struct);
67 rte_free(txq->tx_ring->tx_ring_struct);
68 rte_free(txq->tx_ring);
71 /* Free TX completion ring hardware descriptors */
73 bnxt_free_ring(txq->cp_ring->cp_ring_struct);
74 rte_free(txq->cp_ring->cp_ring_struct);
75 rte_free(txq->cp_ring);
78 bnxt_free_txq_stats(txq);
79 rte_memzone_free(txq->mz);
87 int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
90 unsigned int socket_id,
91 const struct rte_eth_txconf *tx_conf)
93 struct bnxt *bp = eth_dev->data->dev_private;
94 struct bnxt_tx_queue *txq;
97 rc = is_bnxt_in_error(bp);
101 if (queue_idx >= bnxt_max_rings(bp)) {
103 "Cannot create Tx ring %d. Only %d rings available\n",
104 queue_idx, bp->max_tx_rings);
108 if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_TX_DESC_CNT) {
109 PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
113 if (eth_dev->data->tx_queues) {
114 txq = eth_dev->data->tx_queues[queue_idx];
116 bnxt_tx_queue_release_op(txq);
120 txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
121 RTE_CACHE_LINE_SIZE, socket_id);
123 PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
127 txq->free = rte_zmalloc_socket(NULL,
128 sizeof(struct rte_mbuf *) * nb_desc,
129 RTE_CACHE_LINE_SIZE, socket_id);
131 PMD_DRV_LOG(ERR, "allocation of tx mbuf free array failed!");
136 txq->nb_tx_desc = nb_desc;
137 txq->tx_free_thresh =
138 RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_TX_BURST);
139 txq->offloads = eth_dev->data->dev_conf.txmode.offloads |
142 txq->tx_deferred_start = tx_conf->tx_deferred_start;
144 rc = bnxt_init_tx_ring_struct(txq, socket_id);
148 txq->queue_id = queue_idx;
149 txq->port_id = eth_dev->data->port_id;
151 /* Allocate TX ring hardware descriptors */
152 if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, NULL,
154 PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
159 if (bnxt_init_one_tx_ring(txq)) {
160 PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
165 eth_dev->data->tx_queues[queue_idx] = txq;
167 if (txq->tx_deferred_start)
168 txq->tx_started = false;
170 txq->tx_started = true;
174 bnxt_tx_queue_release_op(txq);