net/axgbe: add Rx/Tx setup
[dpdk.git] / drivers / net / axgbe / axgbe_rxtx.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5
6 #include "axgbe_ethdev.h"
7 #include "axgbe_rxtx.h"
8 #include "axgbe_phy.h"
9
10 #include <rte_time.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13
14 static void
15 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
16 {
17         uint16_t i;
18         struct rte_mbuf **sw_ring;
19
20         if (rx_queue) {
21                 sw_ring = rx_queue->sw_ring;
22                 if (sw_ring) {
23                         for (i = 0; i < rx_queue->nb_desc; i++) {
24                                 if (sw_ring[i])
25                                         rte_pktmbuf_free(sw_ring[i]);
26                         }
27                         rte_free(sw_ring);
28                 }
29                 rte_free(rx_queue);
30         }
31 }
32
33 void axgbe_dev_rx_queue_release(void *rxq)
34 {
35         axgbe_rx_queue_release(rxq);
36 }
37
38 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
39                              uint16_t nb_desc, unsigned int socket_id,
40                              const struct rte_eth_rxconf *rx_conf,
41                              struct rte_mempool *mp)
42 {
43         PMD_INIT_FUNC_TRACE();
44         uint32_t size;
45         const struct rte_memzone *dma;
46         struct axgbe_rx_queue *rxq;
47         uint32_t rx_desc = nb_desc;
48         struct axgbe_port *pdata =  dev->data->dev_private;
49
50         /*
51          * validate Rx descriptors count
52          * should be power of 2 and less than h/w supported
53          */
54         if ((!rte_is_power_of_2(rx_desc)) ||
55             rx_desc > pdata->rx_desc_count)
56                 return -EINVAL;
57         /* First allocate the rx queue data structure */
58         rxq = rte_zmalloc_socket("ethdev RX queue",
59                                  sizeof(struct axgbe_rx_queue),
60                                  RTE_CACHE_LINE_SIZE, socket_id);
61         if (!rxq) {
62                 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
63                 return -ENOMEM;
64         }
65
66         rxq->cur = 0;
67         rxq->dirty = 0;
68         rxq->pdata = pdata;
69         rxq->mb_pool = mp;
70         rxq->queue_id = queue_idx;
71         rxq->port_id = dev->data->port_id;
72         rxq->nb_desc = rx_desc;
73         rxq->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
74                 (DMA_CH_INC * rxq->queue_id);
75         rxq->dma_tail_reg = (volatile uint32_t *)(rxq->dma_regs +
76                                                   DMA_CH_RDTR_LO);
77         rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
78                         DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
79
80         /* CRC strip in AXGBE supports per port not per queue */
81         pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
82         rxq->free_thresh = rx_conf->rx_free_thresh ?
83                 rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
84         if (rxq->free_thresh >  rxq->nb_desc)
85                 rxq->free_thresh = rxq->nb_desc >> 3;
86
87         /* Allocate RX ring hardware descriptors */
88         size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
89         dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
90                                        socket_id);
91         if (!dma) {
92                 PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
93                 axgbe_rx_queue_release(rxq);
94                 return -ENOMEM;
95         }
96         rxq->ring_phys_addr = (uint64_t)dma->phys_addr;
97         rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
98         memset((void *)rxq->desc, 0, size);
99         /* Allocate software ring */
100         size = rxq->nb_desc * sizeof(struct rte_mbuf *);
101         rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
102                                           RTE_CACHE_LINE_SIZE,
103                                           socket_id);
104         if (!rxq->sw_ring) {
105                 PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
106                 axgbe_rx_queue_release(rxq);
107                 return -ENOMEM;
108         }
109         dev->data->rx_queues[queue_idx] = rxq;
110         if (!pdata->rx_queues)
111                 pdata->rx_queues = dev->data->rx_queues;
112
113         return 0;
114 }
115
116 /* Tx Apis */
117 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
118 {
119         uint16_t i;
120         struct rte_mbuf **sw_ring;
121
122         if (tx_queue) {
123                 sw_ring = tx_queue->sw_ring;
124                 if (sw_ring) {
125                         for (i = 0; i < tx_queue->nb_desc; i++) {
126                                 if (sw_ring[i])
127                                         rte_pktmbuf_free(sw_ring[i]);
128                         }
129                         rte_free(sw_ring);
130                 }
131                 rte_free(tx_queue);
132         }
133 }
134
135 void axgbe_dev_tx_queue_release(void *txq)
136 {
137         axgbe_tx_queue_release(txq);
138 }
139
140 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
141                              uint16_t nb_desc, unsigned int socket_id,
142                              const struct rte_eth_txconf *tx_conf)
143 {
144         PMD_INIT_FUNC_TRACE();
145         uint32_t tx_desc;
146         struct axgbe_port *pdata;
147         struct axgbe_tx_queue *txq;
148         unsigned int tsize;
149         const struct rte_memzone *tz;
150
151         tx_desc = nb_desc;
152         pdata = (struct axgbe_port *)dev->data->dev_private;
153
154         /*
155          * validate tx descriptors count
156          * should be power of 2 and less than h/w supported
157          */
158         if ((!rte_is_power_of_2(tx_desc)) ||
159             tx_desc > pdata->tx_desc_count ||
160             tx_desc < AXGBE_MIN_RING_DESC)
161                 return -EINVAL;
162
163         /* First allocate the tx queue data structure */
164         txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
165                           RTE_CACHE_LINE_SIZE);
166         if (!txq)
167                 return -ENOMEM;
168         txq->pdata = pdata;
169
170         txq->nb_desc = tx_desc;
171         txq->free_thresh = tx_conf->tx_free_thresh ?
172                 tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
173         if (txq->free_thresh > txq->nb_desc)
174                 txq->free_thresh = (txq->nb_desc >> 1);
175         txq->free_batch_cnt = txq->free_thresh;
176
177         if ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) !=
178             ETH_TXQ_FLAGS_NOOFFLOADS) {
179                 txq->vector_disable = 1;
180         }
181
182         /* Allocate TX ring hardware descriptors */
183         tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
184         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
185                                       tsize, AXGBE_DESC_ALIGN, socket_id);
186         if (!tz) {
187                 axgbe_tx_queue_release(txq);
188                 return -ENOMEM;
189         }
190         memset(tz->addr, 0, tsize);
191         txq->ring_phys_addr = (uint64_t)tz->phys_addr;
192         txq->desc = tz->addr;
193         txq->queue_id = queue_idx;
194         txq->port_id = dev->data->port_id;
195         txq->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
196                 (DMA_CH_INC * txq->queue_id);
197         txq->dma_tail_reg = (volatile uint32_t *)(txq->dma_regs +
198                                                   DMA_CH_TDTR_LO);
199         txq->cur = 0;
200         txq->dirty = 0;
201         txq->nb_desc_free = txq->nb_desc;
202         /* Allocate software ring */
203         tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
204         txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
205                                    RTE_CACHE_LINE_SIZE);
206         if (!txq->sw_ring) {
207                 axgbe_tx_queue_release(txq);
208                 return -ENOMEM;
209         }
210         dev->data->tx_queues[queue_idx] = txq;
211         if (!pdata->tx_queues)
212                 pdata->tx_queues = dev->data->tx_queues;
213
214         return 0;
215 }
216
217 void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
218 {
219         PMD_INIT_FUNC_TRACE();
220         uint8_t i;
221         struct axgbe_rx_queue *rxq;
222         struct axgbe_tx_queue *txq;
223
224         for (i = 0; i < dev->data->nb_rx_queues; i++) {
225                 rxq = dev->data->rx_queues[i];
226
227                 if (rxq) {
228                         axgbe_rx_queue_release(rxq);
229                         dev->data->rx_queues[i] = NULL;
230                 }
231         }
232
233         for (i = 0; i < dev->data->nb_tx_queues; i++) {
234                 txq = dev->data->tx_queues[i];
235
236                 if (txq) {
237                         axgbe_tx_queue_release(txq);
238                         dev->data->tx_queues[i] = NULL;
239                 }
240         }
241 }