1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_ethdev.h"
7 #include "axgbe_rxtx.h"
11 #include <rte_mempool.h>
15 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
18 struct rte_mbuf **sw_ring;
21 sw_ring = rx_queue->sw_ring;
23 for (i = 0; i < rx_queue->nb_desc; i++) {
25 rte_pktmbuf_free(sw_ring[i]);
33 void axgbe_dev_rx_queue_release(void *rxq)
35 axgbe_rx_queue_release(rxq);
38 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
39 uint16_t nb_desc, unsigned int socket_id,
40 const struct rte_eth_rxconf *rx_conf,
41 struct rte_mempool *mp)
43 PMD_INIT_FUNC_TRACE();
45 const struct rte_memzone *dma;
46 struct axgbe_rx_queue *rxq;
47 uint32_t rx_desc = nb_desc;
48 struct axgbe_port *pdata = dev->data->dev_private;
51 * validate Rx descriptors count
52 * should be power of 2 and less than h/w supported
54 if ((!rte_is_power_of_2(rx_desc)) ||
55 rx_desc > pdata->rx_desc_count)
57 /* First allocate the rx queue data structure */
58 rxq = rte_zmalloc_socket("ethdev RX queue",
59 sizeof(struct axgbe_rx_queue),
60 RTE_CACHE_LINE_SIZE, socket_id);
62 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
70 rxq->queue_id = queue_idx;
71 rxq->port_id = dev->data->port_id;
72 rxq->nb_desc = rx_desc;
73 rxq->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
74 (DMA_CH_INC * rxq->queue_id);
75 rxq->dma_tail_reg = (volatile uint32_t *)(rxq->dma_regs +
77 rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
78 DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
80 /* CRC strip in AXGBE supports per port not per queue */
81 pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
82 rxq->free_thresh = rx_conf->rx_free_thresh ?
83 rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
84 if (rxq->free_thresh > rxq->nb_desc)
85 rxq->free_thresh = rxq->nb_desc >> 3;
87 /* Allocate RX ring hardware descriptors */
88 size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
89 dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
92 PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
93 axgbe_rx_queue_release(rxq);
96 rxq->ring_phys_addr = (uint64_t)dma->phys_addr;
97 rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
98 memset((void *)rxq->desc, 0, size);
99 /* Allocate software ring */
100 size = rxq->nb_desc * sizeof(struct rte_mbuf *);
101 rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
105 PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
106 axgbe_rx_queue_release(rxq);
109 dev->data->rx_queues[queue_idx] = rxq;
110 if (!pdata->rx_queues)
111 pdata->rx_queues = dev->data->rx_queues;
117 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
120 struct rte_mbuf **sw_ring;
123 sw_ring = tx_queue->sw_ring;
125 for (i = 0; i < tx_queue->nb_desc; i++) {
127 rte_pktmbuf_free(sw_ring[i]);
135 void axgbe_dev_tx_queue_release(void *txq)
137 axgbe_tx_queue_release(txq);
140 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
141 uint16_t nb_desc, unsigned int socket_id,
142 const struct rte_eth_txconf *tx_conf)
144 PMD_INIT_FUNC_TRACE();
146 struct axgbe_port *pdata;
147 struct axgbe_tx_queue *txq;
149 const struct rte_memzone *tz;
152 pdata = (struct axgbe_port *)dev->data->dev_private;
155 * validate tx descriptors count
156 * should be power of 2 and less than h/w supported
158 if ((!rte_is_power_of_2(tx_desc)) ||
159 tx_desc > pdata->tx_desc_count ||
160 tx_desc < AXGBE_MIN_RING_DESC)
163 /* First allocate the tx queue data structure */
164 txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
165 RTE_CACHE_LINE_SIZE);
170 txq->nb_desc = tx_desc;
171 txq->free_thresh = tx_conf->tx_free_thresh ?
172 tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
173 if (txq->free_thresh > txq->nb_desc)
174 txq->free_thresh = (txq->nb_desc >> 1);
175 txq->free_batch_cnt = txq->free_thresh;
177 if ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) !=
178 ETH_TXQ_FLAGS_NOOFFLOADS) {
179 txq->vector_disable = 1;
182 /* Allocate TX ring hardware descriptors */
183 tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
184 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
185 tsize, AXGBE_DESC_ALIGN, socket_id);
187 axgbe_tx_queue_release(txq);
190 memset(tz->addr, 0, tsize);
191 txq->ring_phys_addr = (uint64_t)tz->phys_addr;
192 txq->desc = tz->addr;
193 txq->queue_id = queue_idx;
194 txq->port_id = dev->data->port_id;
195 txq->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
196 (DMA_CH_INC * txq->queue_id);
197 txq->dma_tail_reg = (volatile uint32_t *)(txq->dma_regs +
201 txq->nb_desc_free = txq->nb_desc;
202 /* Allocate software ring */
203 tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
204 txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
205 RTE_CACHE_LINE_SIZE);
207 axgbe_tx_queue_release(txq);
210 dev->data->tx_queues[queue_idx] = txq;
211 if (!pdata->tx_queues)
212 pdata->tx_queues = dev->data->tx_queues;
217 void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
219 PMD_INIT_FUNC_TRACE();
221 struct axgbe_rx_queue *rxq;
222 struct axgbe_tx_queue *txq;
224 for (i = 0; i < dev->data->nb_rx_queues; i++) {
225 rxq = dev->data->rx_queues[i];
228 axgbe_rx_queue_release(rxq);
229 dev->data->rx_queues[i] = NULL;
233 for (i = 0; i < dev->data->nb_tx_queues; i++) {
234 txq = dev->data->tx_queues[i];
237 axgbe_tx_queue_release(txq);
238 dev->data->tx_queues[i] = NULL;