6bd41d3002bf7cc0596233263555ef99a5ca56a3
[dpdk.git] / drivers / net / axgbe / axgbe_rxtx.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5
6 #include "axgbe_ethdev.h"
7 #include "axgbe_rxtx.h"
8 #include "axgbe_phy.h"
9
10 #include <rte_time.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13 #include <rte_vect.h>
14
15 static void
16 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
17 {
18         uint16_t i;
19         struct rte_mbuf **sw_ring;
20
21         if (rx_queue) {
22                 sw_ring = rx_queue->sw_ring;
23                 if (sw_ring) {
24                         for (i = 0; i < rx_queue->nb_desc; i++) {
25                                 if (sw_ring[i])
26                                         rte_pktmbuf_free(sw_ring[i]);
27                         }
28                         rte_free(sw_ring);
29                 }
30                 rte_free(rx_queue);
31         }
32 }
33
34 void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
35 {
36         axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
37 }
38
39 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
40                              uint16_t nb_desc, unsigned int socket_id,
41                              const struct rte_eth_rxconf *rx_conf,
42                              struct rte_mempool *mp)
43 {
44         PMD_INIT_FUNC_TRACE();
45         uint32_t size;
46         const struct rte_memzone *dma;
47         struct axgbe_rx_queue *rxq;
48         uint32_t rx_desc = nb_desc;
49         struct axgbe_port *pdata =  dev->data->dev_private;
50
51         /*
52          * validate Rx descriptors count
53          * should be power of 2 and less than h/w supported
54          */
55         if ((!rte_is_power_of_2(rx_desc)) ||
56             rx_desc > pdata->rx_desc_count)
57                 return -EINVAL;
58         /* First allocate the rx queue data structure */
59         rxq = rte_zmalloc_socket("ethdev RX queue",
60                                  sizeof(struct axgbe_rx_queue),
61                                  RTE_CACHE_LINE_SIZE, socket_id);
62         if (!rxq) {
63                 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
64                 return -ENOMEM;
65         }
66
67         rxq->cur = 0;
68         rxq->dirty = 0;
69         rxq->pdata = pdata;
70         rxq->mb_pool = mp;
71         rxq->queue_id = queue_idx;
72         rxq->port_id = dev->data->port_id;
73         rxq->nb_desc = rx_desc;
74         rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
75                 (DMA_CH_INC * rxq->queue_id));
76         rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
77                                                   DMA_CH_RDTR_LO);
78         if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
79                 rxq->crc_len = RTE_ETHER_CRC_LEN;
80         else
81                 rxq->crc_len = 0;
82
83         /* CRC strip in AXGBE supports per port not per queue */
84         pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
85         rxq->free_thresh = rx_conf->rx_free_thresh ?
86                 rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
87         if (rxq->free_thresh >  rxq->nb_desc)
88                 rxq->free_thresh = rxq->nb_desc >> 3;
89
90         /* Allocate RX ring hardware descriptors */
91         size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
92         dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
93                                        socket_id);
94         if (!dma) {
95                 PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
96                 axgbe_rx_queue_release(rxq);
97                 return -ENOMEM;
98         }
99         rxq->ring_phys_addr = (uint64_t)dma->iova;
100         rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
101         memset((void *)rxq->desc, 0, size);
102         /* Allocate software ring */
103         size = rxq->nb_desc * sizeof(struct rte_mbuf *);
104         rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
105                                           RTE_CACHE_LINE_SIZE,
106                                           socket_id);
107         if (!rxq->sw_ring) {
108                 PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
109                 axgbe_rx_queue_release(rxq);
110                 return -ENOMEM;
111         }
112         dev->data->rx_queues[queue_idx] = rxq;
113         if (!pdata->rx_queues)
114                 pdata->rx_queues = dev->data->rx_queues;
115
116         return 0;
117 }
118
119 static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
120                                   unsigned int queue)
121 {
122         unsigned int rx_status;
123         unsigned long rx_timeout;
124
125         /* The Rx engine cannot be stopped if it is actively processing
126          * packets. Wait for the Rx queue to empty the Rx fifo.  Don't
127          * wait forever though...
128          */
129         rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
130                                                rte_get_timer_hz());
131
132         while (time_before(rte_get_timer_cycles(), rx_timeout)) {
133                 rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
134                 if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
135                     (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
136                         break;
137
138                 rte_delay_us(900);
139         }
140
141         if (!time_before(rte_get_timer_cycles(), rx_timeout))
142                 PMD_DRV_LOG(ERR,
143                             "timed out waiting for Rx queue %u to empty\n",
144                             queue);
145 }
146
147 void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
148 {
149         struct axgbe_rx_queue *rxq;
150         struct axgbe_port *pdata = dev->data->dev_private;
151         unsigned int i;
152
153         /* Disable MAC Rx */
154         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
155         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
156         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
157         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
158
159         /* Prepare for Rx DMA channel stop */
160         for (i = 0; i < dev->data->nb_rx_queues; i++) {
161                 rxq = dev->data->rx_queues[i];
162                 axgbe_prepare_rx_stop(pdata, i);
163         }
164         /* Disable each Rx queue */
165         AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
166         for (i = 0; i < dev->data->nb_rx_queues; i++) {
167                 rxq = dev->data->rx_queues[i];
168                 /* Disable Rx DMA channel */
169                 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
170         }
171 }
172
173 void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
174 {
175         struct axgbe_rx_queue *rxq;
176         struct axgbe_port *pdata = dev->data->dev_private;
177         unsigned int i;
178         unsigned int reg_val = 0;
179
180         for (i = 0; i < dev->data->nb_rx_queues; i++) {
181                 rxq = dev->data->rx_queues[i];
182                 /* Enable Rx DMA channel */
183                 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
184         }
185
186         reg_val = 0;
187         for (i = 0; i < pdata->rx_q_count; i++)
188                 reg_val |= (0x02 << (i << 1));
189         AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
190
191         /* Enable MAC Rx */
192         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
193         /* Frame is forwarded after stripping CRC to application*/
194         if (pdata->crc_strip_enable) {
195                 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
196                 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
197         }
198         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
199 }
200
201 /* Rx function one to one refresh */
202 uint16_t
203 axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
204                 uint16_t nb_pkts)
205 {
206         PMD_INIT_FUNC_TRACE();
207         uint16_t nb_rx = 0;
208         struct axgbe_rx_queue *rxq = rx_queue;
209         volatile union axgbe_rx_desc *desc;
210         uint64_t old_dirty = rxq->dirty;
211         struct rte_mbuf *mbuf, *tmbuf;
212         unsigned int err, etlt;
213         uint32_t error_status;
214         uint16_t idx, pidx, pkt_len;
215         uint64_t offloads;
216
217         idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
218         while (nb_rx < nb_pkts) {
219                 if (unlikely(idx == rxq->nb_desc))
220                         idx = 0;
221
222                 desc = &rxq->desc[idx];
223
224                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
225                         break;
226                 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
227                 if (unlikely(!tmbuf)) {
228                         PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
229                                     " queue_id = %u\n",
230                                     (unsigned int)rxq->port_id,
231                                     (unsigned int)rxq->queue_id);
232                         rte_eth_devices[
233                                 rxq->port_id].data->rx_mbuf_alloc_failed++;
234                         rxq->rx_mbuf_alloc_failed++;
235                         break;
236                 }
237                 pidx = idx + 1;
238                 if (unlikely(pidx == rxq->nb_desc))
239                         pidx = 0;
240
241                 rte_prefetch0(rxq->sw_ring[pidx]);
242                 if ((pidx & 0x3) == 0) {
243                         rte_prefetch0(&rxq->desc[pidx]);
244                         rte_prefetch0(&rxq->sw_ring[pidx]);
245                 }
246
247                 mbuf = rxq->sw_ring[idx];
248                 /* Check for any errors and free mbuf*/
249                 err = AXGMAC_GET_BITS_LE(desc->write.desc3,
250                                          RX_NORMAL_DESC3, ES);
251                 error_status = 0;
252                 if (unlikely(err)) {
253                         error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
254                         if ((error_status != AXGBE_L3_CSUM_ERR) &&
255                             (error_status != AXGBE_L4_CSUM_ERR)) {
256                                 rxq->errors++;
257                                 rte_pktmbuf_free(mbuf);
258                                 goto err_set;
259                         }
260                 }
261                 if (rxq->pdata->rx_csum_enable) {
262                         mbuf->ol_flags = 0;
263                         mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
264                         mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
265                         if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
266                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
267                                 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
268                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
269                                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
270                         } else if (
271                                 unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
272                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
273                                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
274                         }
275                 }
276                 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
277                 /* Get the RSS hash */
278                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
279                         mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
280                 etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
281                                 RX_NORMAL_DESC3, ETLT);
282                 offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
283                 if (!err || !etlt) {
284                         if (etlt == RX_CVLAN_TAG_PRESENT) {
285                                 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
286                                 mbuf->vlan_tci =
287                                         AXGMAC_GET_BITS_LE(desc->write.desc0,
288                                                         RX_NORMAL_DESC0, OVT);
289                                 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
290                                         mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
291                                 else
292                                         mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
293                         } else {
294                                 mbuf->ol_flags &=
295                                         ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
296                                 mbuf->vlan_tci = 0;
297                         }
298                 }
299                 /* Indicate if a Context Descriptor is next */
300                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA))
301                         mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP
302                                         | RTE_MBUF_F_RX_IEEE1588_TMST;
303                 pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
304                                              PL) - rxq->crc_len;
305                 /* Mbuf populate */
306                 mbuf->next = NULL;
307                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
308                 mbuf->nb_segs = 1;
309                 mbuf->port = rxq->port_id;
310                 mbuf->pkt_len = pkt_len;
311                 mbuf->data_len = pkt_len;
312                 rxq->bytes += pkt_len;
313                 rx_pkts[nb_rx++] = mbuf;
314 err_set:
315                 rxq->cur++;
316                 rxq->sw_ring[idx++] = tmbuf;
317                 desc->read.baddr =
318                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
319                 memset((void *)(&desc->read.desc2), 0, 8);
320                 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
321                 rxq->dirty++;
322         }
323         rxq->pkts += nb_rx;
324         if (rxq->dirty != old_dirty) {
325                 rte_wmb();
326                 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
327                 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
328                                    low32_value(rxq->ring_phys_addr +
329                                    (idx * sizeof(union axgbe_rx_desc))));
330         }
331
332         return nb_rx;
333 }
334
335
336 uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
337                 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
338 {
339         PMD_INIT_FUNC_TRACE();
340         uint16_t nb_rx = 0;
341         struct axgbe_rx_queue *rxq = rx_queue;
342         volatile union axgbe_rx_desc *desc;
343
344         uint64_t old_dirty = rxq->dirty;
345         struct rte_mbuf *first_seg = NULL;
346         struct rte_mbuf *mbuf, *tmbuf;
347         unsigned int err, etlt;
348         uint32_t error_status;
349         uint16_t idx, pidx, data_len = 0, pkt_len = 0;
350         uint64_t offloads;
351
352         idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
353         while (nb_rx < nb_pkts) {
354                 bool eop = 0;
355 next_desc:
356                 if (unlikely(idx == rxq->nb_desc))
357                         idx = 0;
358
359                 desc = &rxq->desc[idx];
360
361                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
362                         break;
363
364                 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
365                 if (unlikely(!tmbuf)) {
366                         PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
367                                     " queue_id = %u\n",
368                                     (unsigned int)rxq->port_id,
369                                     (unsigned int)rxq->queue_id);
370                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
371                         break;
372                 }
373
374                 pidx = idx + 1;
375                 if (unlikely(pidx == rxq->nb_desc))
376                         pidx = 0;
377
378                 rte_prefetch0(rxq->sw_ring[pidx]);
379                 if ((pidx & 0x3) == 0) {
380                         rte_prefetch0(&rxq->desc[pidx]);
381                         rte_prefetch0(&rxq->sw_ring[pidx]);
382                 }
383
384                 mbuf = rxq->sw_ring[idx];
385                 /* Check for any errors and free mbuf*/
386                 err = AXGMAC_GET_BITS_LE(desc->write.desc3,
387                                          RX_NORMAL_DESC3, ES);
388                 error_status = 0;
389                 if (unlikely(err)) {
390                         error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
391                         if ((error_status != AXGBE_L3_CSUM_ERR)
392                                         && (error_status != AXGBE_L4_CSUM_ERR)) {
393                                 rxq->errors++;
394                                 rte_pktmbuf_free(mbuf);
395                                 goto err_set;
396                         }
397                 }
398                 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
399
400                 if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
401                                         RX_NORMAL_DESC3, LD)) {
402                         eop = 0;
403                         pkt_len = rxq->buf_size;
404                         data_len = pkt_len;
405                 } else {
406                         eop = 1;
407                         pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
408                                         RX_NORMAL_DESC3, PL);
409                         data_len = pkt_len - rxq->crc_len;
410                 }
411
412                 if (first_seg != NULL) {
413                         if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
414                                 rte_mempool_put(rxq->mb_pool,
415                                                 first_seg);
416                 } else {
417                         first_seg = mbuf;
418                 }
419
420                 /* Get the RSS hash */
421                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
422                         mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
423                 etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
424                                 RX_NORMAL_DESC3, ETLT);
425                 offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
426                 if (!err || !etlt) {
427                         if (etlt == RX_CVLAN_TAG_PRESENT) {
428                                 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
429                                 mbuf->vlan_tci =
430                                         AXGMAC_GET_BITS_LE(desc->write.desc0,
431                                                         RX_NORMAL_DESC0, OVT);
432                                 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
433                                         mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
434                                 else
435                                         mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
436                         } else {
437                                 mbuf->ol_flags &=
438                                         ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
439                                 mbuf->vlan_tci = 0;
440                         }
441                 }
442                 /* Mbuf populate */
443                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
444                 mbuf->data_len = data_len;
445
446 err_set:
447                 rxq->cur++;
448                 rxq->sw_ring[idx++] = tmbuf;
449                 desc->read.baddr =
450                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
451                 memset((void *)(&desc->read.desc2), 0, 8);
452                 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
453                 rxq->dirty++;
454
455                 if (!eop) {
456                         rte_pktmbuf_free(mbuf);
457                         goto next_desc;
458                 }
459
460                 first_seg->pkt_len = pkt_len;
461                 rxq->bytes += pkt_len;
462                 mbuf->next = NULL;
463
464                 first_seg->port = rxq->port_id;
465                 if (rxq->pdata->rx_csum_enable) {
466                         mbuf->ol_flags = 0;
467                         mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
468                         mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
469                         if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
470                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
471                                 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
472                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
473                                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
474                         } else if (unlikely(error_status
475                                                 == AXGBE_L4_CSUM_ERR)) {
476                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
477                                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
478                         }
479                 }
480
481                 rx_pkts[nb_rx++] = first_seg;
482
483                  /* Setup receipt context for a new packet.*/
484                 first_seg = NULL;
485         }
486
487         /* Save receive context.*/
488         rxq->pkts += nb_rx;
489
490         if (rxq->dirty != old_dirty) {
491                 rte_wmb();
492                 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
493                 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
494                                    low32_value(rxq->ring_phys_addr +
495                                    (idx * sizeof(union axgbe_rx_desc))));
496         }
497         return nb_rx;
498 }
499
500 /* Tx Apis */
501 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
502 {
503         uint16_t i;
504         struct rte_mbuf **sw_ring;
505
506         if (tx_queue) {
507                 sw_ring = tx_queue->sw_ring;
508                 if (sw_ring) {
509                         for (i = 0; i < tx_queue->nb_desc; i++) {
510                                 if (sw_ring[i])
511                                         rte_pktmbuf_free(sw_ring[i]);
512                         }
513                         rte_free(sw_ring);
514                 }
515                 rte_free(tx_queue);
516         }
517 }
518
519 void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
520 {
521         axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
522 }
523
524 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
525                              uint16_t nb_desc, unsigned int socket_id,
526                              const struct rte_eth_txconf *tx_conf)
527 {
528         PMD_INIT_FUNC_TRACE();
529         uint32_t tx_desc;
530         struct axgbe_port *pdata;
531         struct axgbe_tx_queue *txq;
532         unsigned int tsize;
533         const struct rte_memzone *tz;
534         uint64_t offloads;
535
536         tx_desc = nb_desc;
537         pdata = dev->data->dev_private;
538
539         /*
540          * validate tx descriptors count
541          * should be power of 2 and less than h/w supported
542          */
543         if ((!rte_is_power_of_2(tx_desc)) ||
544             tx_desc > pdata->tx_desc_count ||
545             tx_desc < AXGBE_MIN_RING_DESC)
546                 return -EINVAL;
547
548         /* First allocate the tx queue data structure */
549         txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
550                           RTE_CACHE_LINE_SIZE);
551         if (!txq)
552                 return -ENOMEM;
553         txq->pdata = pdata;
554         offloads = tx_conf->offloads |
555                 txq->pdata->eth_dev->data->dev_conf.txmode.offloads;
556         txq->nb_desc = tx_desc;
557         txq->free_thresh = tx_conf->tx_free_thresh ?
558                 tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
559         if (txq->free_thresh > txq->nb_desc)
560                 txq->free_thresh = (txq->nb_desc >> 1);
561         txq->free_batch_cnt = txq->free_thresh;
562
563         /* In vector_tx path threshold should be multiple of queue_size*/
564         if (txq->nb_desc % txq->free_thresh != 0)
565                 txq->vector_disable = 1;
566
567         if (offloads != 0)
568                 txq->vector_disable = 1;
569
570         /* Allocate TX ring hardware descriptors */
571         tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
572         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
573                                       tsize, AXGBE_DESC_ALIGN, socket_id);
574         if (!tz) {
575                 axgbe_tx_queue_release(txq);
576                 return -ENOMEM;
577         }
578         memset(tz->addr, 0, tsize);
579         txq->ring_phys_addr = (uint64_t)tz->iova;
580         txq->desc = tz->addr;
581         txq->queue_id = queue_idx;
582         txq->port_id = dev->data->port_id;
583         txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
584                 (DMA_CH_INC * txq->queue_id));
585         txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
586                                                   DMA_CH_TDTR_LO);
587         txq->cur = 0;
588         txq->dirty = 0;
589         txq->nb_desc_free = txq->nb_desc;
590         /* Allocate software ring */
591         tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
592         txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
593                                    RTE_CACHE_LINE_SIZE);
594         if (!txq->sw_ring) {
595                 axgbe_tx_queue_release(txq);
596                 return -ENOMEM;
597         }
598         dev->data->tx_queues[queue_idx] = txq;
599         if (!pdata->tx_queues)
600                 pdata->tx_queues = dev->data->tx_queues;
601
602         if (txq->vector_disable ||
603                         rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
604                 dev->tx_pkt_burst = &axgbe_xmit_pkts;
605         else
606 #ifdef RTE_ARCH_X86
607                 dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
608 #else
609                 dev->tx_pkt_burst = &axgbe_xmit_pkts;
610 #endif
611
612         return 0;
613 }
614
615 int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
616                 char *fw_version, size_t fw_size)
617 {
618         struct axgbe_port *pdata;
619         struct axgbe_hw_features *hw_feat;
620         int ret;
621
622         pdata = (struct axgbe_port *)eth_dev->data->dev_private;
623         hw_feat = &pdata->hw_feat;
624
625         ret = snprintf(fw_version, fw_size, "%d.%d.%d",
626                         AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
627                         AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
628                         AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
629         if (ret < 0)
630                 return -EINVAL;
631
632         ret += 1; /* add the size of '\0' */
633         if (fw_size < (size_t)ret)
634                 return ret;
635         else
636                 return 0;
637 }
638
639 static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
640                                       unsigned int queue)
641 {
642         unsigned int tx_status;
643         unsigned long tx_timeout;
644
645         /* The Tx engine cannot be stopped if it is actively processing
646          * packets. Wait for the Tx queue to empty the Tx fifo.  Don't
647          * wait forever though...
648          */
649         tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
650                                                rte_get_timer_hz());
651         while (time_before(rte_get_timer_cycles(), tx_timeout)) {
652                 tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
653                 if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
654                     (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
655                         break;
656
657                 rte_delay_us(900);
658         }
659
660         if (!time_before(rte_get_timer_cycles(), tx_timeout))
661                 PMD_DRV_LOG(ERR,
662                             "timed out waiting for Tx queue %u to empty\n",
663                             queue);
664 }
665
666 static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
667                                   unsigned int queue)
668 {
669         unsigned int tx_dsr, tx_pos, tx_qidx;
670         unsigned int tx_status;
671         unsigned long tx_timeout;
672
673         if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
674                 return axgbe_txq_prepare_tx_stop(pdata, queue);
675
676         /* Calculate the status register to read and the position within */
677         if (queue < DMA_DSRX_FIRST_QUEUE) {
678                 tx_dsr = DMA_DSR0;
679                 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
680         } else {
681                 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
682
683                 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
684                 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
685                         DMA_DSRX_TPS_START;
686         }
687
688         /* The Tx engine cannot be stopped if it is actively processing
689          * descriptors. Wait for the Tx engine to enter the stopped or
690          * suspended state.  Don't wait forever though...
691          */
692         tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
693                                                rte_get_timer_hz());
694         while (time_before(rte_get_timer_cycles(), tx_timeout)) {
695                 tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
696                 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
697                 if ((tx_status == DMA_TPS_STOPPED) ||
698                     (tx_status == DMA_TPS_SUSPENDED))
699                         break;
700
701                 rte_delay_us(900);
702         }
703
704         if (!time_before(rte_get_timer_cycles(), tx_timeout))
705                 PMD_DRV_LOG(ERR,
706                             "timed out waiting for Tx DMA channel %u to stop\n",
707                             queue);
708 }
709
710 void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
711 {
712         struct axgbe_tx_queue *txq;
713         struct axgbe_port *pdata = dev->data->dev_private;
714         unsigned int i;
715
716         /* Prepare for stopping DMA channel */
717         for (i = 0; i < pdata->tx_q_count; i++) {
718                 txq = dev->data->tx_queues[i];
719                 axgbe_prepare_tx_stop(pdata, i);
720         }
721         /* Disable MAC Tx */
722         AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
723         /* Disable each Tx queue*/
724         for (i = 0; i < pdata->tx_q_count; i++)
725                 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
726                                         0);
727         /* Disable each  Tx DMA channel */
728         for (i = 0; i < dev->data->nb_tx_queues; i++) {
729                 txq = dev->data->tx_queues[i];
730                 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
731         }
732 }
733
734 void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
735 {
736         struct axgbe_tx_queue *txq;
737         struct axgbe_port *pdata = dev->data->dev_private;
738         unsigned int i;
739
740         for (i = 0; i < dev->data->nb_tx_queues; i++) {
741                 txq = dev->data->tx_queues[i];
742                 /* Enable Tx DMA channel */
743                 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
744         }
745         /* Enable Tx queue*/
746         for (i = 0; i < pdata->tx_q_count; i++)
747                 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
748                                         MTL_Q_ENABLED);
749         /* Enable MAC Tx */
750         AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
751 }
752
753 /* Free Tx conformed mbufs */
754 static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
755 {
756         volatile struct axgbe_tx_desc *desc;
757         uint16_t idx;
758
759         idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
760         while (txq->cur != txq->dirty) {
761                 if (unlikely(idx == txq->nb_desc))
762                         idx = 0;
763                 desc = &txq->desc[idx];
764                 /* Check for ownership */
765                 if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
766                         return;
767                 memset((void *)&desc->desc2, 0, 8);
768                 /* Free mbuf */
769                 rte_pktmbuf_free(txq->sw_ring[idx]);
770                 txq->sw_ring[idx++] = NULL;
771                 txq->dirty++;
772         }
773 }
774
775 /* Tx Descriptor formation
776  * Considering each mbuf requires one desc
777  * mbuf is linear
778  */
779 static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
780                          struct rte_mbuf *mbuf)
781 {
782         volatile struct axgbe_tx_desc *desc;
783         uint16_t idx;
784         uint64_t mask;
785
786         idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
787         desc = &txq->desc[idx];
788
789         /* Update buffer address  and length */
790         desc->baddr = rte_mbuf_data_iova(mbuf);
791         AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
792                            mbuf->pkt_len);
793         /* Total msg length to transmit */
794         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
795                            mbuf->pkt_len);
796         /* Timestamp enablement check */
797         if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
798                 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
799         rte_wmb();
800         /* Mark it as First and Last Descriptor */
801         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
802         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
803         /* Mark it as a NORMAL descriptor */
804         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
805         /* configure h/w Offload */
806         mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
807         if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
808                 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
809         else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
810                 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
811         rte_wmb();
812
813         if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
814                 /* Mark it as a CONTEXT descriptor */
815                 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
816                                   CTXT, 1);
817                 /* Set the VLAN tag */
818                 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
819                                   VT, mbuf->vlan_tci);
820                 /* Indicate this descriptor contains the VLAN tag */
821                 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
822                                           VLTV, 1);
823                 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
824                                 TX_NORMAL_DESC2_VLAN_INSERT);
825         } else {
826                 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
827         }
828         rte_wmb();
829
830         /* Set OWN bit */
831         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
832         rte_wmb();
833
834
835         /* Save mbuf */
836         txq->sw_ring[idx] = mbuf;
837         /* Update current index*/
838         txq->cur++;
839         /* Update stats */
840         txq->bytes += mbuf->pkt_len;
841
842         return 0;
843 }
844
845 /* Eal supported tx wrapper*/
846 uint16_t
847 axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
848                 uint16_t nb_pkts)
849 {
850         PMD_INIT_FUNC_TRACE();
851
852         if (unlikely(nb_pkts == 0))
853                 return nb_pkts;
854
855         struct axgbe_tx_queue *txq;
856         uint16_t nb_desc_free;
857         uint16_t nb_pkt_sent = 0;
858         uint16_t idx;
859         uint32_t tail_addr;
860         struct rte_mbuf *mbuf;
861
862         txq  = (struct axgbe_tx_queue *)tx_queue;
863         nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
864
865         if (unlikely(nb_desc_free <= txq->free_thresh)) {
866                 axgbe_xmit_cleanup(txq);
867                 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
868                 if (unlikely(nb_desc_free == 0))
869                         return 0;
870         }
871         nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
872         while (nb_pkts--) {
873                 mbuf = *tx_pkts++;
874                 if (axgbe_xmit_hw(txq, mbuf))
875                         goto out;
876                 nb_pkt_sent++;
877         }
878 out:
879         /* Sync read and write */
880         rte_mb();
881         idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
882         tail_addr = low32_value(txq->ring_phys_addr +
883                                 idx * sizeof(struct axgbe_tx_desc));
884         /* Update tail reg with next immediate address to kick Tx DMA channel*/
885         AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
886         txq->pkts += nb_pkt_sent;
887         return nb_pkt_sent;
888 }
889
890 void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
891 {
892         PMD_INIT_FUNC_TRACE();
893         uint8_t i;
894         struct axgbe_rx_queue *rxq;
895         struct axgbe_tx_queue *txq;
896
897         for (i = 0; i < dev->data->nb_rx_queues; i++) {
898                 rxq = dev->data->rx_queues[i];
899
900                 if (rxq) {
901                         axgbe_rx_queue_release(rxq);
902                         dev->data->rx_queues[i] = NULL;
903                 }
904         }
905
906         for (i = 0; i < dev->data->nb_tx_queues; i++) {
907                 txq = dev->data->tx_queues[i];
908
909                 if (txq) {
910                         axgbe_tx_queue_release(txq);
911                         dev->data->tx_queues[i] = NULL;
912                 }
913         }
914 }
915
916 int
917 axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
918 {
919         struct axgbe_rx_queue *rxq = rx_queue;
920         volatile union axgbe_rx_desc *desc;
921         uint16_t idx;
922
923
924         if (unlikely(offset >= rxq->nb_desc))
925                 return -EINVAL;
926
927         if (offset >= rxq->nb_desc - rxq->dirty)
928                 return RTE_ETH_RX_DESC_UNAVAIL;
929
930         idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
931         desc = &rxq->desc[idx + offset];
932
933         if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
934                 return RTE_ETH_RX_DESC_DONE;
935
936         return RTE_ETH_RX_DESC_AVAIL;
937 }
938
939 int
940 axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
941 {
942         struct axgbe_tx_queue *txq = tx_queue;
943         volatile struct axgbe_tx_desc *desc;
944         uint16_t idx;
945
946
947         if (unlikely(offset >= txq->nb_desc))
948                 return -EINVAL;
949
950         if (offset >= txq->nb_desc - txq->dirty)
951                 return RTE_ETH_TX_DESC_UNAVAIL;
952
953         idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1);
954         desc = &txq->desc[idx + offset];
955
956         if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
957                 return RTE_ETH_TX_DESC_DONE;
958
959         return RTE_ETH_TX_DESC_FULL;
960 }