remove unnecessary null checks
[dpdk.git] / drivers / net / axgbe / axgbe_rxtx.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5
6 #include "axgbe_ethdev.h"
7 #include "axgbe_rxtx.h"
8 #include "axgbe_phy.h"
9
10 #include <rte_time.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13 #include <rte_vect.h>
14
15 static void
16 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
17 {
18         uint16_t i;
19         struct rte_mbuf **sw_ring;
20
21         if (rx_queue) {
22                 sw_ring = rx_queue->sw_ring;
23                 if (sw_ring) {
24                         for (i = 0; i < rx_queue->nb_desc; i++) {
25                                 rte_pktmbuf_free(sw_ring[i]);
26                         }
27                         rte_free(sw_ring);
28                 }
29                 rte_free(rx_queue);
30         }
31 }
32
33 void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
34 {
35         axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
36 }
37
38 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
39                              uint16_t nb_desc, unsigned int socket_id,
40                              const struct rte_eth_rxconf *rx_conf,
41                              struct rte_mempool *mp)
42 {
43         PMD_INIT_FUNC_TRACE();
44         uint32_t size;
45         const struct rte_memzone *dma;
46         struct axgbe_rx_queue *rxq;
47         uint32_t rx_desc = nb_desc;
48         struct axgbe_port *pdata =  dev->data->dev_private;
49
50         /*
51          * validate Rx descriptors count
52          * should be power of 2 and less than h/w supported
53          */
54         if ((!rte_is_power_of_2(rx_desc)) ||
55             rx_desc > pdata->rx_desc_count)
56                 return -EINVAL;
57         /* First allocate the rx queue data structure */
58         rxq = rte_zmalloc_socket("ethdev RX queue",
59                                  sizeof(struct axgbe_rx_queue),
60                                  RTE_CACHE_LINE_SIZE, socket_id);
61         if (!rxq) {
62                 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
63                 return -ENOMEM;
64         }
65
66         rxq->cur = 0;
67         rxq->dirty = 0;
68         rxq->pdata = pdata;
69         rxq->mb_pool = mp;
70         rxq->queue_id = queue_idx;
71         rxq->port_id = dev->data->port_id;
72         rxq->nb_desc = rx_desc;
73         rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
74                 (DMA_CH_INC * rxq->queue_id));
75         rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
76                                                   DMA_CH_RDTR_LO);
77         if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
78                 rxq->crc_len = RTE_ETHER_CRC_LEN;
79         else
80                 rxq->crc_len = 0;
81
82         /* CRC strip in AXGBE supports per port not per queue */
83         pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
84         rxq->free_thresh = rx_conf->rx_free_thresh ?
85                 rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
86         if (rxq->free_thresh >  rxq->nb_desc)
87                 rxq->free_thresh = rxq->nb_desc >> 3;
88
89         /* Allocate RX ring hardware descriptors */
90         size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
91         dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
92                                        socket_id);
93         if (!dma) {
94                 PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
95                 axgbe_rx_queue_release(rxq);
96                 return -ENOMEM;
97         }
98         rxq->ring_phys_addr = (uint64_t)dma->iova;
99         rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
100         memset((void *)rxq->desc, 0, size);
101         /* Allocate software ring */
102         size = rxq->nb_desc * sizeof(struct rte_mbuf *);
103         rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
104                                           RTE_CACHE_LINE_SIZE,
105                                           socket_id);
106         if (!rxq->sw_ring) {
107                 PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
108                 axgbe_rx_queue_release(rxq);
109                 return -ENOMEM;
110         }
111         dev->data->rx_queues[queue_idx] = rxq;
112         if (!pdata->rx_queues)
113                 pdata->rx_queues = dev->data->rx_queues;
114
115         return 0;
116 }
117
118 static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
119                                   unsigned int queue)
120 {
121         unsigned int rx_status;
122         unsigned long rx_timeout;
123
124         /* The Rx engine cannot be stopped if it is actively processing
125          * packets. Wait for the Rx queue to empty the Rx fifo.  Don't
126          * wait forever though...
127          */
128         rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
129                                                rte_get_timer_hz());
130
131         while (time_before(rte_get_timer_cycles(), rx_timeout)) {
132                 rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
133                 if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
134                     (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
135                         break;
136
137                 rte_delay_us(900);
138         }
139
140         if (!time_before(rte_get_timer_cycles(), rx_timeout))
141                 PMD_DRV_LOG(ERR,
142                             "timed out waiting for Rx queue %u to empty\n",
143                             queue);
144 }
145
146 void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
147 {
148         struct axgbe_rx_queue *rxq;
149         struct axgbe_port *pdata = dev->data->dev_private;
150         unsigned int i;
151
152         /* Disable MAC Rx */
153         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
154         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
155         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
156         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
157
158         /* Prepare for Rx DMA channel stop */
159         for (i = 0; i < dev->data->nb_rx_queues; i++) {
160                 rxq = dev->data->rx_queues[i];
161                 axgbe_prepare_rx_stop(pdata, i);
162         }
163         /* Disable each Rx queue */
164         AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
165         for (i = 0; i < dev->data->nb_rx_queues; i++) {
166                 rxq = dev->data->rx_queues[i];
167                 /* Disable Rx DMA channel */
168                 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
169         }
170 }
171
172 void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
173 {
174         struct axgbe_rx_queue *rxq;
175         struct axgbe_port *pdata = dev->data->dev_private;
176         unsigned int i;
177         unsigned int reg_val = 0;
178
179         for (i = 0; i < dev->data->nb_rx_queues; i++) {
180                 rxq = dev->data->rx_queues[i];
181                 /* Enable Rx DMA channel */
182                 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
183         }
184
185         reg_val = 0;
186         for (i = 0; i < pdata->rx_q_count; i++)
187                 reg_val |= (0x02 << (i << 1));
188         AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
189
190         /* Enable MAC Rx */
191         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
192         /* Frame is forwarded after stripping CRC to application*/
193         if (pdata->crc_strip_enable) {
194                 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
195                 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
196         }
197         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
198 }
199
200 /* Rx function one to one refresh */
201 uint16_t
202 axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
203                 uint16_t nb_pkts)
204 {
205         PMD_INIT_FUNC_TRACE();
206         uint16_t nb_rx = 0;
207         struct axgbe_rx_queue *rxq = rx_queue;
208         volatile union axgbe_rx_desc *desc;
209         uint64_t old_dirty = rxq->dirty;
210         struct rte_mbuf *mbuf, *tmbuf;
211         unsigned int err, etlt;
212         uint32_t error_status;
213         uint16_t idx, pidx, pkt_len;
214         uint64_t offloads;
215
216         idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
217         while (nb_rx < nb_pkts) {
218                 if (unlikely(idx == rxq->nb_desc))
219                         idx = 0;
220
221                 desc = &rxq->desc[idx];
222
223                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
224                         break;
225                 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
226                 if (unlikely(!tmbuf)) {
227                         PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
228                                     " queue_id = %u\n",
229                                     (unsigned int)rxq->port_id,
230                                     (unsigned int)rxq->queue_id);
231                         rte_eth_devices[
232                                 rxq->port_id].data->rx_mbuf_alloc_failed++;
233                         rxq->rx_mbuf_alloc_failed++;
234                         break;
235                 }
236                 pidx = idx + 1;
237                 if (unlikely(pidx == rxq->nb_desc))
238                         pidx = 0;
239
240                 rte_prefetch0(rxq->sw_ring[pidx]);
241                 if ((pidx & 0x3) == 0) {
242                         rte_prefetch0(&rxq->desc[pidx]);
243                         rte_prefetch0(&rxq->sw_ring[pidx]);
244                 }
245
246                 mbuf = rxq->sw_ring[idx];
247                 /* Check for any errors and free mbuf*/
248                 err = AXGMAC_GET_BITS_LE(desc->write.desc3,
249                                          RX_NORMAL_DESC3, ES);
250                 error_status = 0;
251                 if (unlikely(err)) {
252                         error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
253                         if ((error_status != AXGBE_L3_CSUM_ERR) &&
254                             (error_status != AXGBE_L4_CSUM_ERR)) {
255                                 rxq->errors++;
256                                 rte_pktmbuf_free(mbuf);
257                                 goto err_set;
258                         }
259                 }
260                 if (rxq->pdata->rx_csum_enable) {
261                         mbuf->ol_flags = 0;
262                         mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
263                         mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
264                         if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
265                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
266                                 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
267                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
268                                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
269                         } else if (
270                                 unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
271                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
272                                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
273                         }
274                 }
275                 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
276                 /* Get the RSS hash */
277                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
278                         mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
279                 etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
280                                 RX_NORMAL_DESC3, ETLT);
281                 offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
282                 if (!err || !etlt) {
283                         if (etlt == RX_CVLAN_TAG_PRESENT) {
284                                 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
285                                 mbuf->vlan_tci =
286                                         AXGMAC_GET_BITS_LE(desc->write.desc0,
287                                                         RX_NORMAL_DESC0, OVT);
288                                 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
289                                         mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
290                                 else
291                                         mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
292                         } else {
293                                 mbuf->ol_flags &=
294                                         ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
295                                 mbuf->vlan_tci = 0;
296                         }
297                 }
298                 /* Indicate if a Context Descriptor is next */
299                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA))
300                         mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP
301                                         | RTE_MBUF_F_RX_IEEE1588_TMST;
302                 pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
303                                              PL) - rxq->crc_len;
304                 /* Mbuf populate */
305                 mbuf->next = NULL;
306                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
307                 mbuf->nb_segs = 1;
308                 mbuf->port = rxq->port_id;
309                 mbuf->pkt_len = pkt_len;
310                 mbuf->data_len = pkt_len;
311                 rxq->bytes += pkt_len;
312                 rx_pkts[nb_rx++] = mbuf;
313 err_set:
314                 rxq->cur++;
315                 rxq->sw_ring[idx++] = tmbuf;
316                 desc->read.baddr =
317                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
318                 memset((void *)(&desc->read.desc2), 0, 8);
319                 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
320                 rxq->dirty++;
321         }
322         rxq->pkts += nb_rx;
323         if (rxq->dirty != old_dirty) {
324                 rte_wmb();
325                 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
326                 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
327                                    low32_value(rxq->ring_phys_addr +
328                                    (idx * sizeof(union axgbe_rx_desc))));
329         }
330
331         return nb_rx;
332 }
333
334
335 uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
336                 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
337 {
338         PMD_INIT_FUNC_TRACE();
339         uint16_t nb_rx = 0;
340         struct axgbe_rx_queue *rxq = rx_queue;
341         volatile union axgbe_rx_desc *desc;
342
343         uint64_t old_dirty = rxq->dirty;
344         struct rte_mbuf *first_seg = NULL;
345         struct rte_mbuf *mbuf, *tmbuf;
346         unsigned int err, etlt;
347         uint32_t error_status;
348         uint16_t idx, pidx, data_len = 0, pkt_len = 0;
349         uint64_t offloads;
350
351         idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
352         while (nb_rx < nb_pkts) {
353                 bool eop = 0;
354 next_desc:
355                 if (unlikely(idx == rxq->nb_desc))
356                         idx = 0;
357
358                 desc = &rxq->desc[idx];
359
360                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
361                         break;
362
363                 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
364                 if (unlikely(!tmbuf)) {
365                         PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
366                                     " queue_id = %u\n",
367                                     (unsigned int)rxq->port_id,
368                                     (unsigned int)rxq->queue_id);
369                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
370                         break;
371                 }
372
373                 pidx = idx + 1;
374                 if (unlikely(pidx == rxq->nb_desc))
375                         pidx = 0;
376
377                 rte_prefetch0(rxq->sw_ring[pidx]);
378                 if ((pidx & 0x3) == 0) {
379                         rte_prefetch0(&rxq->desc[pidx]);
380                         rte_prefetch0(&rxq->sw_ring[pidx]);
381                 }
382
383                 mbuf = rxq->sw_ring[idx];
384                 /* Check for any errors and free mbuf*/
385                 err = AXGMAC_GET_BITS_LE(desc->write.desc3,
386                                          RX_NORMAL_DESC3, ES);
387                 error_status = 0;
388                 if (unlikely(err)) {
389                         error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
390                         if ((error_status != AXGBE_L3_CSUM_ERR)
391                                         && (error_status != AXGBE_L4_CSUM_ERR)) {
392                                 rxq->errors++;
393                                 rte_pktmbuf_free(mbuf);
394                                 goto err_set;
395                         }
396                 }
397                 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
398
399                 if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
400                                         RX_NORMAL_DESC3, LD)) {
401                         eop = 0;
402                         pkt_len = rxq->buf_size;
403                         data_len = pkt_len;
404                 } else {
405                         eop = 1;
406                         pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
407                                         RX_NORMAL_DESC3, PL);
408                         data_len = pkt_len - rxq->crc_len;
409                 }
410
411                 if (first_seg != NULL) {
412                         if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
413                                 rte_mempool_put(rxq->mb_pool,
414                                                 first_seg);
415                 } else {
416                         first_seg = mbuf;
417                 }
418
419                 /* Get the RSS hash */
420                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
421                         mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
422                 etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
423                                 RX_NORMAL_DESC3, ETLT);
424                 offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
425                 if (!err || !etlt) {
426                         if (etlt == RX_CVLAN_TAG_PRESENT) {
427                                 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
428                                 mbuf->vlan_tci =
429                                         AXGMAC_GET_BITS_LE(desc->write.desc0,
430                                                         RX_NORMAL_DESC0, OVT);
431                                 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
432                                         mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
433                                 else
434                                         mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
435                         } else {
436                                 mbuf->ol_flags &=
437                                         ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
438                                 mbuf->vlan_tci = 0;
439                         }
440                 }
441                 /* Mbuf populate */
442                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
443                 mbuf->data_len = data_len;
444
445 err_set:
446                 rxq->cur++;
447                 rxq->sw_ring[idx++] = tmbuf;
448                 desc->read.baddr =
449                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
450                 memset((void *)(&desc->read.desc2), 0, 8);
451                 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
452                 rxq->dirty++;
453
454                 if (!eop) {
455                         rte_pktmbuf_free(mbuf);
456                         goto next_desc;
457                 }
458
459                 first_seg->pkt_len = pkt_len;
460                 rxq->bytes += pkt_len;
461                 mbuf->next = NULL;
462
463                 first_seg->port = rxq->port_id;
464                 if (rxq->pdata->rx_csum_enable) {
465                         mbuf->ol_flags = 0;
466                         mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
467                         mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
468                         if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
469                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
470                                 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
471                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
472                                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
473                         } else if (unlikely(error_status
474                                                 == AXGBE_L4_CSUM_ERR)) {
475                                 mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
476                                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
477                         }
478                 }
479
480                 rx_pkts[nb_rx++] = first_seg;
481
482                  /* Setup receipt context for a new packet.*/
483                 first_seg = NULL;
484         }
485
486         /* Save receive context.*/
487         rxq->pkts += nb_rx;
488
489         if (rxq->dirty != old_dirty) {
490                 rte_wmb();
491                 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
492                 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
493                                    low32_value(rxq->ring_phys_addr +
494                                    (idx * sizeof(union axgbe_rx_desc))));
495         }
496         return nb_rx;
497 }
498
499 /* Tx Apis */
500 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
501 {
502         uint16_t i;
503         struct rte_mbuf **sw_ring;
504
505         if (tx_queue) {
506                 sw_ring = tx_queue->sw_ring;
507                 if (sw_ring) {
508                         for (i = 0; i < tx_queue->nb_desc; i++) {
509                                 rte_pktmbuf_free(sw_ring[i]);
510                         }
511                         rte_free(sw_ring);
512                 }
513                 rte_free(tx_queue);
514         }
515 }
516
517 void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
518 {
519         axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
520 }
521
522 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
523                              uint16_t nb_desc, unsigned int socket_id,
524                              const struct rte_eth_txconf *tx_conf)
525 {
526         PMD_INIT_FUNC_TRACE();
527         uint32_t tx_desc;
528         struct axgbe_port *pdata;
529         struct axgbe_tx_queue *txq;
530         unsigned int tsize;
531         const struct rte_memzone *tz;
532         uint64_t offloads;
533
534         tx_desc = nb_desc;
535         pdata = dev->data->dev_private;
536
537         /*
538          * validate tx descriptors count
539          * should be power of 2 and less than h/w supported
540          */
541         if ((!rte_is_power_of_2(tx_desc)) ||
542             tx_desc > pdata->tx_desc_count ||
543             tx_desc < AXGBE_MIN_RING_DESC)
544                 return -EINVAL;
545
546         /* First allocate the tx queue data structure */
547         txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
548                           RTE_CACHE_LINE_SIZE);
549         if (!txq)
550                 return -ENOMEM;
551         txq->pdata = pdata;
552         offloads = tx_conf->offloads |
553                 txq->pdata->eth_dev->data->dev_conf.txmode.offloads;
554         txq->nb_desc = tx_desc;
555         txq->free_thresh = tx_conf->tx_free_thresh ?
556                 tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
557         if (txq->free_thresh > txq->nb_desc)
558                 txq->free_thresh = (txq->nb_desc >> 1);
559         txq->free_batch_cnt = txq->free_thresh;
560
561         /* In vector_tx path threshold should be multiple of queue_size*/
562         if (txq->nb_desc % txq->free_thresh != 0)
563                 txq->vector_disable = 1;
564
565         if (offloads != 0)
566                 txq->vector_disable = 1;
567
568         /* Allocate TX ring hardware descriptors */
569         tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
570         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
571                                       tsize, AXGBE_DESC_ALIGN, socket_id);
572         if (!tz) {
573                 axgbe_tx_queue_release(txq);
574                 return -ENOMEM;
575         }
576         memset(tz->addr, 0, tsize);
577         txq->ring_phys_addr = (uint64_t)tz->iova;
578         txq->desc = tz->addr;
579         txq->queue_id = queue_idx;
580         txq->port_id = dev->data->port_id;
581         txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
582                 (DMA_CH_INC * txq->queue_id));
583         txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
584                                                   DMA_CH_TDTR_LO);
585         txq->cur = 0;
586         txq->dirty = 0;
587         txq->nb_desc_free = txq->nb_desc;
588         /* Allocate software ring */
589         tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
590         txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
591                                    RTE_CACHE_LINE_SIZE);
592         if (!txq->sw_ring) {
593                 axgbe_tx_queue_release(txq);
594                 return -ENOMEM;
595         }
596         dev->data->tx_queues[queue_idx] = txq;
597         if (!pdata->tx_queues)
598                 pdata->tx_queues = dev->data->tx_queues;
599
600         if (txq->vector_disable ||
601                         rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
602                 dev->tx_pkt_burst = &axgbe_xmit_pkts;
603         else
604 #ifdef RTE_ARCH_X86
605                 dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
606 #else
607                 dev->tx_pkt_burst = &axgbe_xmit_pkts;
608 #endif
609
610         return 0;
611 }
612
613 int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
614                 char *fw_version, size_t fw_size)
615 {
616         struct axgbe_port *pdata;
617         struct axgbe_hw_features *hw_feat;
618         int ret;
619
620         pdata = (struct axgbe_port *)eth_dev->data->dev_private;
621         hw_feat = &pdata->hw_feat;
622
623         ret = snprintf(fw_version, fw_size, "%d.%d.%d",
624                         AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
625                         AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
626                         AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
627         if (ret < 0)
628                 return -EINVAL;
629
630         ret += 1; /* add the size of '\0' */
631         if (fw_size < (size_t)ret)
632                 return ret;
633         else
634                 return 0;
635 }
636
637 static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
638                                       unsigned int queue)
639 {
640         unsigned int tx_status;
641         unsigned long tx_timeout;
642
643         /* The Tx engine cannot be stopped if it is actively processing
644          * packets. Wait for the Tx queue to empty the Tx fifo.  Don't
645          * wait forever though...
646          */
647         tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
648                                                rte_get_timer_hz());
649         while (time_before(rte_get_timer_cycles(), tx_timeout)) {
650                 tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
651                 if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
652                     (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
653                         break;
654
655                 rte_delay_us(900);
656         }
657
658         if (!time_before(rte_get_timer_cycles(), tx_timeout))
659                 PMD_DRV_LOG(ERR,
660                             "timed out waiting for Tx queue %u to empty\n",
661                             queue);
662 }
663
664 static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
665                                   unsigned int queue)
666 {
667         unsigned int tx_dsr, tx_pos, tx_qidx;
668         unsigned int tx_status;
669         unsigned long tx_timeout;
670
671         if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
672                 return axgbe_txq_prepare_tx_stop(pdata, queue);
673
674         /* Calculate the status register to read and the position within */
675         if (queue < DMA_DSRX_FIRST_QUEUE) {
676                 tx_dsr = DMA_DSR0;
677                 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
678         } else {
679                 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
680
681                 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
682                 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
683                         DMA_DSRX_TPS_START;
684         }
685
686         /* The Tx engine cannot be stopped if it is actively processing
687          * descriptors. Wait for the Tx engine to enter the stopped or
688          * suspended state.  Don't wait forever though...
689          */
690         tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
691                                                rte_get_timer_hz());
692         while (time_before(rte_get_timer_cycles(), tx_timeout)) {
693                 tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
694                 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
695                 if ((tx_status == DMA_TPS_STOPPED) ||
696                     (tx_status == DMA_TPS_SUSPENDED))
697                         break;
698
699                 rte_delay_us(900);
700         }
701
702         if (!time_before(rte_get_timer_cycles(), tx_timeout))
703                 PMD_DRV_LOG(ERR,
704                             "timed out waiting for Tx DMA channel %u to stop\n",
705                             queue);
706 }
707
708 void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
709 {
710         struct axgbe_tx_queue *txq;
711         struct axgbe_port *pdata = dev->data->dev_private;
712         unsigned int i;
713
714         /* Prepare for stopping DMA channel */
715         for (i = 0; i < pdata->tx_q_count; i++) {
716                 txq = dev->data->tx_queues[i];
717                 axgbe_prepare_tx_stop(pdata, i);
718         }
719         /* Disable MAC Tx */
720         AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
721         /* Disable each Tx queue*/
722         for (i = 0; i < pdata->tx_q_count; i++)
723                 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
724                                         0);
725         /* Disable each  Tx DMA channel */
726         for (i = 0; i < dev->data->nb_tx_queues; i++) {
727                 txq = dev->data->tx_queues[i];
728                 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
729         }
730 }
731
732 void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
733 {
734         struct axgbe_tx_queue *txq;
735         struct axgbe_port *pdata = dev->data->dev_private;
736         unsigned int i;
737
738         for (i = 0; i < dev->data->nb_tx_queues; i++) {
739                 txq = dev->data->tx_queues[i];
740                 /* Enable Tx DMA channel */
741                 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
742         }
743         /* Enable Tx queue*/
744         for (i = 0; i < pdata->tx_q_count; i++)
745                 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
746                                         MTL_Q_ENABLED);
747         /* Enable MAC Tx */
748         AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
749 }
750
751 /* Free Tx conformed mbufs */
752 static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
753 {
754         volatile struct axgbe_tx_desc *desc;
755         uint16_t idx;
756
757         idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
758         while (txq->cur != txq->dirty) {
759                 if (unlikely(idx == txq->nb_desc))
760                         idx = 0;
761                 desc = &txq->desc[idx];
762                 /* Check for ownership */
763                 if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
764                         return;
765                 memset((void *)&desc->desc2, 0, 8);
766                 /* Free mbuf */
767                 rte_pktmbuf_free(txq->sw_ring[idx]);
768                 txq->sw_ring[idx++] = NULL;
769                 txq->dirty++;
770         }
771 }
772
773 /* Tx Descriptor formation
774  * Considering each mbuf requires one desc
775  * mbuf is linear
776  */
777 static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
778                          struct rte_mbuf *mbuf)
779 {
780         volatile struct axgbe_tx_desc *desc;
781         uint16_t idx;
782         uint64_t mask;
783
784         idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
785         desc = &txq->desc[idx];
786
787         /* Update buffer address  and length */
788         desc->baddr = rte_mbuf_data_iova(mbuf);
789         AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
790                            mbuf->pkt_len);
791         /* Total msg length to transmit */
792         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
793                            mbuf->pkt_len);
794         /* Timestamp enablement check */
795         if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
796                 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
797         rte_wmb();
798         /* Mark it as First and Last Descriptor */
799         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
800         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
801         /* Mark it as a NORMAL descriptor */
802         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
803         /* configure h/w Offload */
804         mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
805         if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
806                 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
807         else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
808                 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
809         rte_wmb();
810
811         if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
812                 /* Mark it as a CONTEXT descriptor */
813                 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
814                                   CTXT, 1);
815                 /* Set the VLAN tag */
816                 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
817                                   VT, mbuf->vlan_tci);
818                 /* Indicate this descriptor contains the VLAN tag */
819                 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
820                                           VLTV, 1);
821                 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
822                                 TX_NORMAL_DESC2_VLAN_INSERT);
823         } else {
824                 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
825         }
826         rte_wmb();
827
828         /* Set OWN bit */
829         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
830         rte_wmb();
831
832
833         /* Save mbuf */
834         txq->sw_ring[idx] = mbuf;
835         /* Update current index*/
836         txq->cur++;
837         /* Update stats */
838         txq->bytes += mbuf->pkt_len;
839
840         return 0;
841 }
842
843 /* Eal supported tx wrapper*/
844 uint16_t
845 axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
846                 uint16_t nb_pkts)
847 {
848         PMD_INIT_FUNC_TRACE();
849
850         if (unlikely(nb_pkts == 0))
851                 return nb_pkts;
852
853         struct axgbe_tx_queue *txq;
854         uint16_t nb_desc_free;
855         uint16_t nb_pkt_sent = 0;
856         uint16_t idx;
857         uint32_t tail_addr;
858         struct rte_mbuf *mbuf;
859
860         txq  = (struct axgbe_tx_queue *)tx_queue;
861         nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
862
863         if (unlikely(nb_desc_free <= txq->free_thresh)) {
864                 axgbe_xmit_cleanup(txq);
865                 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
866                 if (unlikely(nb_desc_free == 0))
867                         return 0;
868         }
869         nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
870         while (nb_pkts--) {
871                 mbuf = *tx_pkts++;
872                 if (axgbe_xmit_hw(txq, mbuf))
873                         goto out;
874                 nb_pkt_sent++;
875         }
876 out:
877         /* Sync read and write */
878         rte_mb();
879         idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
880         tail_addr = low32_value(txq->ring_phys_addr +
881                                 idx * sizeof(struct axgbe_tx_desc));
882         /* Update tail reg with next immediate address to kick Tx DMA channel*/
883         AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
884         txq->pkts += nb_pkt_sent;
885         return nb_pkt_sent;
886 }
887
888 void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
889 {
890         PMD_INIT_FUNC_TRACE();
891         uint8_t i;
892         struct axgbe_rx_queue *rxq;
893         struct axgbe_tx_queue *txq;
894
895         for (i = 0; i < dev->data->nb_rx_queues; i++) {
896                 rxq = dev->data->rx_queues[i];
897
898                 if (rxq) {
899                         axgbe_rx_queue_release(rxq);
900                         dev->data->rx_queues[i] = NULL;
901                 }
902         }
903
904         for (i = 0; i < dev->data->nb_tx_queues; i++) {
905                 txq = dev->data->tx_queues[i];
906
907                 if (txq) {
908                         axgbe_tx_queue_release(txq);
909                         dev->data->tx_queues[i] = NULL;
910                 }
911         }
912 }
913
914 int
915 axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
916 {
917         struct axgbe_rx_queue *rxq = rx_queue;
918         volatile union axgbe_rx_desc *desc;
919         uint16_t idx;
920
921
922         if (unlikely(offset >= rxq->nb_desc))
923                 return -EINVAL;
924
925         if (offset >= rxq->nb_desc - rxq->dirty)
926                 return RTE_ETH_RX_DESC_UNAVAIL;
927
928         idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
929         desc = &rxq->desc[idx + offset];
930
931         if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
932                 return RTE_ETH_RX_DESC_DONE;
933
934         return RTE_ETH_RX_DESC_AVAIL;
935 }
936
937 int
938 axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
939 {
940         struct axgbe_tx_queue *txq = tx_queue;
941         volatile struct axgbe_tx_desc *desc;
942         uint16_t idx;
943
944
945         if (unlikely(offset >= txq->nb_desc))
946                 return -EINVAL;
947
948         if (offset >= txq->nb_desc - txq->dirty)
949                 return RTE_ETH_TX_DESC_UNAVAIL;
950
951         idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1);
952         desc = &txq->desc[idx + offset];
953
954         if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
955                 return RTE_ETH_TX_DESC_DONE;
956
957         return RTE_ETH_TX_DESC_FULL;
958 }