net/axgbe: support VLAN
[dpdk.git] / drivers / net / axgbe / axgbe_rxtx.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5
6 #include "axgbe_ethdev.h"
7 #include "axgbe_rxtx.h"
8 #include "axgbe_phy.h"
9
10 #include <rte_time.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13 #include <rte_vect.h>
14
15 static void
16 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
17 {
18         uint16_t i;
19         struct rte_mbuf **sw_ring;
20
21         if (rx_queue) {
22                 sw_ring = rx_queue->sw_ring;
23                 if (sw_ring) {
24                         for (i = 0; i < rx_queue->nb_desc; i++) {
25                                 if (sw_ring[i])
26                                         rte_pktmbuf_free(sw_ring[i]);
27                         }
28                         rte_free(sw_ring);
29                 }
30                 rte_free(rx_queue);
31         }
32 }
33
34 void axgbe_dev_rx_queue_release(void *rxq)
35 {
36         axgbe_rx_queue_release(rxq);
37 }
38
39 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
40                              uint16_t nb_desc, unsigned int socket_id,
41                              const struct rte_eth_rxconf *rx_conf,
42                              struct rte_mempool *mp)
43 {
44         PMD_INIT_FUNC_TRACE();
45         uint32_t size;
46         const struct rte_memzone *dma;
47         struct axgbe_rx_queue *rxq;
48         uint32_t rx_desc = nb_desc;
49         struct axgbe_port *pdata =  dev->data->dev_private;
50
51         /*
52          * validate Rx descriptors count
53          * should be power of 2 and less than h/w supported
54          */
55         if ((!rte_is_power_of_2(rx_desc)) ||
56             rx_desc > pdata->rx_desc_count)
57                 return -EINVAL;
58         /* First allocate the rx queue data structure */
59         rxq = rte_zmalloc_socket("ethdev RX queue",
60                                  sizeof(struct axgbe_rx_queue),
61                                  RTE_CACHE_LINE_SIZE, socket_id);
62         if (!rxq) {
63                 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
64                 return -ENOMEM;
65         }
66
67         rxq->cur = 0;
68         rxq->dirty = 0;
69         rxq->pdata = pdata;
70         rxq->mb_pool = mp;
71         rxq->queue_id = queue_idx;
72         rxq->port_id = dev->data->port_id;
73         rxq->nb_desc = rx_desc;
74         rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
75                 (DMA_CH_INC * rxq->queue_id));
76         rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
77                                                   DMA_CH_RDTR_LO);
78         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
79                 rxq->crc_len = RTE_ETHER_CRC_LEN;
80         else
81                 rxq->crc_len = 0;
82
83         /* CRC strip in AXGBE supports per port not per queue */
84         pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
85         rxq->free_thresh = rx_conf->rx_free_thresh ?
86                 rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
87         if (rxq->free_thresh >  rxq->nb_desc)
88                 rxq->free_thresh = rxq->nb_desc >> 3;
89
90         /* Allocate RX ring hardware descriptors */
91         size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
92         dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
93                                        socket_id);
94         if (!dma) {
95                 PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
96                 axgbe_rx_queue_release(rxq);
97                 return -ENOMEM;
98         }
99         rxq->ring_phys_addr = (uint64_t)dma->iova;
100         rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
101         memset((void *)rxq->desc, 0, size);
102         /* Allocate software ring */
103         size = rxq->nb_desc * sizeof(struct rte_mbuf *);
104         rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
105                                           RTE_CACHE_LINE_SIZE,
106                                           socket_id);
107         if (!rxq->sw_ring) {
108                 PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
109                 axgbe_rx_queue_release(rxq);
110                 return -ENOMEM;
111         }
112         dev->data->rx_queues[queue_idx] = rxq;
113         if (!pdata->rx_queues)
114                 pdata->rx_queues = dev->data->rx_queues;
115
116         return 0;
117 }
118
119 static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
120                                   unsigned int queue)
121 {
122         unsigned int rx_status;
123         unsigned long rx_timeout;
124
125         /* The Rx engine cannot be stopped if it is actively processing
126          * packets. Wait for the Rx queue to empty the Rx fifo.  Don't
127          * wait forever though...
128          */
129         rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
130                                                rte_get_timer_hz());
131
132         while (time_before(rte_get_timer_cycles(), rx_timeout)) {
133                 rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
134                 if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
135                     (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
136                         break;
137
138                 rte_delay_us(900);
139         }
140
141         if (!time_before(rte_get_timer_cycles(), rx_timeout))
142                 PMD_DRV_LOG(ERR,
143                             "timed out waiting for Rx queue %u to empty\n",
144                             queue);
145 }
146
147 void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
148 {
149         struct axgbe_rx_queue *rxq;
150         struct axgbe_port *pdata = dev->data->dev_private;
151         unsigned int i;
152
153         /* Disable MAC Rx */
154         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
155         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
156         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
157         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
158
159         /* Prepare for Rx DMA channel stop */
160         for (i = 0; i < dev->data->nb_rx_queues; i++) {
161                 rxq = dev->data->rx_queues[i];
162                 axgbe_prepare_rx_stop(pdata, i);
163         }
164         /* Disable each Rx queue */
165         AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
166         for (i = 0; i < dev->data->nb_rx_queues; i++) {
167                 rxq = dev->data->rx_queues[i];
168                 /* Disable Rx DMA channel */
169                 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
170         }
171 }
172
173 void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
174 {
175         struct axgbe_rx_queue *rxq;
176         struct axgbe_port *pdata = dev->data->dev_private;
177         unsigned int i;
178         unsigned int reg_val = 0;
179
180         for (i = 0; i < dev->data->nb_rx_queues; i++) {
181                 rxq = dev->data->rx_queues[i];
182                 /* Enable Rx DMA channel */
183                 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
184         }
185
186         reg_val = 0;
187         for (i = 0; i < pdata->rx_q_count; i++)
188                 reg_val |= (0x02 << (i << 1));
189         AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
190
191         /* Enable MAC Rx */
192         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
193         /* Frame is forwarded after stripping CRC to application*/
194         if (pdata->crc_strip_enable) {
195                 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
196                 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
197         }
198         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
199 }
200
201 /* Rx function one to one refresh */
202 uint16_t
203 axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
204                 uint16_t nb_pkts)
205 {
206         PMD_INIT_FUNC_TRACE();
207         uint16_t nb_rx = 0;
208         struct axgbe_rx_queue *rxq = rx_queue;
209         volatile union axgbe_rx_desc *desc;
210         uint64_t old_dirty = rxq->dirty;
211         struct rte_mbuf *mbuf, *tmbuf;
212         unsigned int err, etlt;
213         uint32_t error_status;
214         uint16_t idx, pidx, pkt_len;
215         uint64_t offloads;
216
217         idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
218         while (nb_rx < nb_pkts) {
219                 if (unlikely(idx == rxq->nb_desc))
220                         idx = 0;
221
222                 desc = &rxq->desc[idx];
223
224                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
225                         break;
226                 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
227                 if (unlikely(!tmbuf)) {
228                         PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
229                                     " queue_id = %u\n",
230                                     (unsigned int)rxq->port_id,
231                                     (unsigned int)rxq->queue_id);
232                         rte_eth_devices[
233                                 rxq->port_id].data->rx_mbuf_alloc_failed++;
234                         rxq->rx_mbuf_alloc_failed++;
235                         break;
236                 }
237                 pidx = idx + 1;
238                 if (unlikely(pidx == rxq->nb_desc))
239                         pidx = 0;
240
241                 rte_prefetch0(rxq->sw_ring[pidx]);
242                 if ((pidx & 0x3) == 0) {
243                         rte_prefetch0(&rxq->desc[pidx]);
244                         rte_prefetch0(&rxq->sw_ring[pidx]);
245                 }
246
247                 mbuf = rxq->sw_ring[idx];
248                 /* Check for any errors and free mbuf*/
249                 err = AXGMAC_GET_BITS_LE(desc->write.desc3,
250                                          RX_NORMAL_DESC3, ES);
251                 error_status = 0;
252                 if (unlikely(err)) {
253                         error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
254                         if ((error_status != AXGBE_L3_CSUM_ERR) &&
255                             (error_status != AXGBE_L4_CSUM_ERR)) {
256                                 rxq->errors++;
257                                 rte_pktmbuf_free(mbuf);
258                                 goto err_set;
259                         }
260                 }
261                 if (rxq->pdata->rx_csum_enable) {
262                         mbuf->ol_flags = 0;
263                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
264                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
265                         if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
266                                 mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
267                                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
268                                 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
269                                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
270                         } else if (
271                                 unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
272                                 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
273                                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
274                         }
275                 }
276                 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
277                 /* Get the RSS hash */
278                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
279                         mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
280                 etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
281                                 RX_NORMAL_DESC3, ETLT);
282                 offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
283                 if (!err || !etlt) {
284                         if (etlt == RX_CVLAN_TAG_PRESENT) {
285                                 mbuf->ol_flags |= PKT_RX_VLAN;
286                                 mbuf->vlan_tci =
287                                         AXGMAC_GET_BITS_LE(desc->write.desc0,
288                                                         RX_NORMAL_DESC0, OVT);
289                                 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
290                                         mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
291                                 else
292                                         mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
293                                 } else {
294                                         mbuf->ol_flags &=
295                                                 ~(PKT_RX_VLAN
296                                                         | PKT_RX_VLAN_STRIPPED);
297                                         mbuf->vlan_tci = 0;
298                                 }
299                 }
300                 /* Indicate if a Context Descriptor is next */
301                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA))
302                         mbuf->ol_flags |= PKT_RX_IEEE1588_PTP
303                                         | PKT_RX_IEEE1588_TMST;
304                 pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
305                                              PL) - rxq->crc_len;
306                 /* Mbuf populate */
307                 mbuf->next = NULL;
308                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
309                 mbuf->nb_segs = 1;
310                 mbuf->port = rxq->port_id;
311                 mbuf->pkt_len = pkt_len;
312                 mbuf->data_len = pkt_len;
313                 rxq->bytes += pkt_len;
314                 rx_pkts[nb_rx++] = mbuf;
315 err_set:
316                 rxq->cur++;
317                 rxq->sw_ring[idx++] = tmbuf;
318                 desc->read.baddr =
319                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
320                 memset((void *)(&desc->read.desc2), 0, 8);
321                 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
322                 rxq->dirty++;
323         }
324         rxq->pkts += nb_rx;
325         if (rxq->dirty != old_dirty) {
326                 rte_wmb();
327                 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
328                 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
329                                    low32_value(rxq->ring_phys_addr +
330                                    (idx * sizeof(union axgbe_rx_desc))));
331         }
332
333         return nb_rx;
334 }
335
336
337 uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
338                 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
339 {
340         PMD_INIT_FUNC_TRACE();
341         uint16_t nb_rx = 0;
342         struct axgbe_rx_queue *rxq = rx_queue;
343         volatile union axgbe_rx_desc *desc;
344
345         uint64_t old_dirty = rxq->dirty;
346         struct rte_mbuf *first_seg = NULL;
347         struct rte_mbuf *mbuf, *tmbuf;
348         unsigned int err, etlt;
349         uint32_t error_status;
350         uint16_t idx, pidx, data_len = 0, pkt_len = 0;
351         uint64_t offloads;
352
353         idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
354         while (nb_rx < nb_pkts) {
355                 bool eop = 0;
356 next_desc:
357                 if (unlikely(idx == rxq->nb_desc))
358                         idx = 0;
359
360                 desc = &rxq->desc[idx];
361
362                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
363                         break;
364
365                 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
366                 if (unlikely(!tmbuf)) {
367                         PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
368                                     " queue_id = %u\n",
369                                     (unsigned int)rxq->port_id,
370                                     (unsigned int)rxq->queue_id);
371                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
372                         break;
373                 }
374
375                 pidx = idx + 1;
376                 if (unlikely(pidx == rxq->nb_desc))
377                         pidx = 0;
378
379                 rte_prefetch0(rxq->sw_ring[pidx]);
380                 if ((pidx & 0x3) == 0) {
381                         rte_prefetch0(&rxq->desc[pidx]);
382                         rte_prefetch0(&rxq->sw_ring[pidx]);
383                 }
384
385                 mbuf = rxq->sw_ring[idx];
386                 /* Check for any errors and free mbuf*/
387                 err = AXGMAC_GET_BITS_LE(desc->write.desc3,
388                                          RX_NORMAL_DESC3, ES);
389                 error_status = 0;
390                 if (unlikely(err)) {
391                         error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
392                         if ((error_status != AXGBE_L3_CSUM_ERR)
393                                         && (error_status != AXGBE_L4_CSUM_ERR)) {
394                                 rxq->errors++;
395                                 rte_pktmbuf_free(mbuf);
396                                 goto err_set;
397                         }
398                 }
399                 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
400
401                 if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
402                                         RX_NORMAL_DESC3, LD)) {
403                         eop = 0;
404                         pkt_len = rxq->buf_size;
405                         data_len = pkt_len;
406                 } else {
407                         eop = 1;
408                         pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
409                                         RX_NORMAL_DESC3, PL);
410                         data_len = pkt_len - rxq->crc_len;
411                 }
412
413                 if (first_seg != NULL) {
414                         if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
415                                 rte_mempool_put(rxq->mb_pool,
416                                                 first_seg);
417                 } else {
418                         first_seg = mbuf;
419                 }
420
421                 /* Get the RSS hash */
422                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
423                         mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
424                 etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
425                                 RX_NORMAL_DESC3, ETLT);
426                 offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
427                 if (!err || !etlt) {
428                         if (etlt == RX_CVLAN_TAG_PRESENT) {
429                                 mbuf->ol_flags |= PKT_RX_VLAN;
430                                 mbuf->vlan_tci =
431                                         AXGMAC_GET_BITS_LE(desc->write.desc0,
432                                                         RX_NORMAL_DESC0, OVT);
433                                 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
434                                         mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
435                                 else
436                                         mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
437                         } else {
438                                 mbuf->ol_flags &=
439                                         ~(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
440                                 mbuf->vlan_tci = 0;
441                         }
442                 }
443                 /* Mbuf populate */
444                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
445                 mbuf->data_len = data_len;
446
447 err_set:
448                 rxq->cur++;
449                 rxq->sw_ring[idx++] = tmbuf;
450                 desc->read.baddr =
451                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
452                 memset((void *)(&desc->read.desc2), 0, 8);
453                 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
454                 rxq->dirty++;
455
456                 if (!eop) {
457                         rte_pktmbuf_free(mbuf);
458                         goto next_desc;
459                 }
460
461                 first_seg->pkt_len = pkt_len;
462                 rxq->bytes += pkt_len;
463                 mbuf->next = NULL;
464
465                 first_seg->port = rxq->port_id;
466                 if (rxq->pdata->rx_csum_enable) {
467                         mbuf->ol_flags = 0;
468                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
469                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
470                         if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
471                                 mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
472                                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
473                                 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
474                                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
475                         } else if (unlikely(error_status
476                                                 == AXGBE_L4_CSUM_ERR)) {
477                                 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
478                                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
479                         }
480                 }
481
482                 rx_pkts[nb_rx++] = first_seg;
483
484                  /* Setup receipt context for a new packet.*/
485                 first_seg = NULL;
486         }
487
488         /* Save receive context.*/
489         rxq->pkts += nb_rx;
490
491         if (rxq->dirty != old_dirty) {
492                 rte_wmb();
493                 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
494                 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
495                                    low32_value(rxq->ring_phys_addr +
496                                    (idx * sizeof(union axgbe_rx_desc))));
497         }
498         return nb_rx;
499 }
500
501 /* Tx Apis */
502 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
503 {
504         uint16_t i;
505         struct rte_mbuf **sw_ring;
506
507         if (tx_queue) {
508                 sw_ring = tx_queue->sw_ring;
509                 if (sw_ring) {
510                         for (i = 0; i < tx_queue->nb_desc; i++) {
511                                 if (sw_ring[i])
512                                         rte_pktmbuf_free(sw_ring[i]);
513                         }
514                         rte_free(sw_ring);
515                 }
516                 rte_free(tx_queue);
517         }
518 }
519
520 void axgbe_dev_tx_queue_release(void *txq)
521 {
522         axgbe_tx_queue_release(txq);
523 }
524
525 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
526                              uint16_t nb_desc, unsigned int socket_id,
527                              const struct rte_eth_txconf *tx_conf)
528 {
529         PMD_INIT_FUNC_TRACE();
530         uint32_t tx_desc;
531         struct axgbe_port *pdata;
532         struct axgbe_tx_queue *txq;
533         unsigned int tsize;
534         const struct rte_memzone *tz;
535         uint64_t offloads;
536
537         tx_desc = nb_desc;
538         pdata = dev->data->dev_private;
539
540         /*
541          * validate tx descriptors count
542          * should be power of 2 and less than h/w supported
543          */
544         if ((!rte_is_power_of_2(tx_desc)) ||
545             tx_desc > pdata->tx_desc_count ||
546             tx_desc < AXGBE_MIN_RING_DESC)
547                 return -EINVAL;
548
549         /* First allocate the tx queue data structure */
550         txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
551                           RTE_CACHE_LINE_SIZE);
552         if (!txq)
553                 return -ENOMEM;
554         txq->pdata = pdata;
555         offloads = tx_conf->offloads |
556                 txq->pdata->eth_dev->data->dev_conf.txmode.offloads;
557         txq->nb_desc = tx_desc;
558         txq->free_thresh = tx_conf->tx_free_thresh ?
559                 tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
560         if (txq->free_thresh > txq->nb_desc)
561                 txq->free_thresh = (txq->nb_desc >> 1);
562         txq->free_batch_cnt = txq->free_thresh;
563
564         /* In vector_tx path threshold should be multiple of queue_size*/
565         if (txq->nb_desc % txq->free_thresh != 0)
566                 txq->vector_disable = 1;
567
568         if (offloads != 0)
569                 txq->vector_disable = 1;
570
571         /* Allocate TX ring hardware descriptors */
572         tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
573         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
574                                       tsize, AXGBE_DESC_ALIGN, socket_id);
575         if (!tz) {
576                 axgbe_tx_queue_release(txq);
577                 return -ENOMEM;
578         }
579         memset(tz->addr, 0, tsize);
580         txq->ring_phys_addr = (uint64_t)tz->iova;
581         txq->desc = tz->addr;
582         txq->queue_id = queue_idx;
583         txq->port_id = dev->data->port_id;
584         txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
585                 (DMA_CH_INC * txq->queue_id));
586         txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
587                                                   DMA_CH_TDTR_LO);
588         txq->cur = 0;
589         txq->dirty = 0;
590         txq->nb_desc_free = txq->nb_desc;
591         /* Allocate software ring */
592         tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
593         txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
594                                    RTE_CACHE_LINE_SIZE);
595         if (!txq->sw_ring) {
596                 axgbe_tx_queue_release(txq);
597                 return -ENOMEM;
598         }
599         dev->data->tx_queues[queue_idx] = txq;
600         if (!pdata->tx_queues)
601                 pdata->tx_queues = dev->data->tx_queues;
602
603         if (txq->vector_disable ||
604                         rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
605                 dev->tx_pkt_burst = &axgbe_xmit_pkts;
606         else
607 #ifdef RTE_ARCH_X86
608                 dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
609 #else
610                 dev->tx_pkt_burst = &axgbe_xmit_pkts;
611 #endif
612
613         return 0;
614 }
615
616 static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
617                                       unsigned int queue)
618 {
619         unsigned int tx_status;
620         unsigned long tx_timeout;
621
622         /* The Tx engine cannot be stopped if it is actively processing
623          * packets. Wait for the Tx queue to empty the Tx fifo.  Don't
624          * wait forever though...
625          */
626         tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
627                                                rte_get_timer_hz());
628         while (time_before(rte_get_timer_cycles(), tx_timeout)) {
629                 tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
630                 if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
631                     (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
632                         break;
633
634                 rte_delay_us(900);
635         }
636
637         if (!time_before(rte_get_timer_cycles(), tx_timeout))
638                 PMD_DRV_LOG(ERR,
639                             "timed out waiting for Tx queue %u to empty\n",
640                             queue);
641 }
642
643 static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
644                                   unsigned int queue)
645 {
646         unsigned int tx_dsr, tx_pos, tx_qidx;
647         unsigned int tx_status;
648         unsigned long tx_timeout;
649
650         if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
651                 return axgbe_txq_prepare_tx_stop(pdata, queue);
652
653         /* Calculate the status register to read and the position within */
654         if (queue < DMA_DSRX_FIRST_QUEUE) {
655                 tx_dsr = DMA_DSR0;
656                 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
657         } else {
658                 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
659
660                 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
661                 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
662                         DMA_DSRX_TPS_START;
663         }
664
665         /* The Tx engine cannot be stopped if it is actively processing
666          * descriptors. Wait for the Tx engine to enter the stopped or
667          * suspended state.  Don't wait forever though...
668          */
669         tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
670                                                rte_get_timer_hz());
671         while (time_before(rte_get_timer_cycles(), tx_timeout)) {
672                 tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
673                 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
674                 if ((tx_status == DMA_TPS_STOPPED) ||
675                     (tx_status == DMA_TPS_SUSPENDED))
676                         break;
677
678                 rte_delay_us(900);
679         }
680
681         if (!time_before(rte_get_timer_cycles(), tx_timeout))
682                 PMD_DRV_LOG(ERR,
683                             "timed out waiting for Tx DMA channel %u to stop\n",
684                             queue);
685 }
686
687 void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
688 {
689         struct axgbe_tx_queue *txq;
690         struct axgbe_port *pdata = dev->data->dev_private;
691         unsigned int i;
692
693         /* Prepare for stopping DMA channel */
694         for (i = 0; i < pdata->tx_q_count; i++) {
695                 txq = dev->data->tx_queues[i];
696                 axgbe_prepare_tx_stop(pdata, i);
697         }
698         /* Disable MAC Tx */
699         AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
700         /* Disable each Tx queue*/
701         for (i = 0; i < pdata->tx_q_count; i++)
702                 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
703                                         0);
704         /* Disable each  Tx DMA channel */
705         for (i = 0; i < dev->data->nb_tx_queues; i++) {
706                 txq = dev->data->tx_queues[i];
707                 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
708         }
709 }
710
711 void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
712 {
713         struct axgbe_tx_queue *txq;
714         struct axgbe_port *pdata = dev->data->dev_private;
715         unsigned int i;
716
717         for (i = 0; i < dev->data->nb_tx_queues; i++) {
718                 txq = dev->data->tx_queues[i];
719                 /* Enable Tx DMA channel */
720                 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
721         }
722         /* Enable Tx queue*/
723         for (i = 0; i < pdata->tx_q_count; i++)
724                 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
725                                         MTL_Q_ENABLED);
726         /* Enable MAC Tx */
727         AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
728 }
729
730 /* Free Tx conformed mbufs */
731 static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
732 {
733         volatile struct axgbe_tx_desc *desc;
734         uint16_t idx;
735
736         idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
737         while (txq->cur != txq->dirty) {
738                 if (unlikely(idx == txq->nb_desc))
739                         idx = 0;
740                 desc = &txq->desc[idx];
741                 /* Check for ownership */
742                 if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
743                         return;
744                 memset((void *)&desc->desc2, 0, 8);
745                 /* Free mbuf */
746                 rte_pktmbuf_free(txq->sw_ring[idx]);
747                 txq->sw_ring[idx++] = NULL;
748                 txq->dirty++;
749         }
750 }
751
752 /* Tx Descriptor formation
753  * Considering each mbuf requires one desc
754  * mbuf is linear
755  */
756 static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
757                          struct rte_mbuf *mbuf)
758 {
759         volatile struct axgbe_tx_desc *desc;
760         uint16_t idx;
761         uint64_t mask;
762
763         idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
764         desc = &txq->desc[idx];
765
766         /* Update buffer address  and length */
767         desc->baddr = rte_mbuf_data_iova(mbuf);
768         AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
769                            mbuf->pkt_len);
770         /* Total msg length to transmit */
771         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
772                            mbuf->pkt_len);
773         /* Timestamp enablement check */
774         if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST)
775                 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
776         rte_wmb();
777         /* Mark it as First and Last Descriptor */
778         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
779         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
780         /* Mark it as a NORMAL descriptor */
781         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
782         /* configure h/w Offload */
783         mask = mbuf->ol_flags & PKT_TX_L4_MASK;
784         if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM))
785                 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
786         else if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
787                 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
788         rte_wmb();
789
790         if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
791                 /* Mark it as a CONTEXT descriptor */
792                 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
793                                   CTXT, 1);
794                 /* Set the VLAN tag */
795                 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
796                                   VT, mbuf->vlan_tci);
797                 /* Indicate this descriptor contains the VLAN tag */
798                 AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
799                                           VLTV, 1);
800                 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
801                                 TX_NORMAL_DESC2_VLAN_INSERT);
802         } else {
803                 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
804         }
805         rte_wmb();
806
807         /* Set OWN bit */
808         AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
809         rte_wmb();
810
811
812         /* Save mbuf */
813         txq->sw_ring[idx] = mbuf;
814         /* Update current index*/
815         txq->cur++;
816         /* Update stats */
817         txq->bytes += mbuf->pkt_len;
818
819         return 0;
820 }
821
822 /* Eal supported tx wrapper*/
823 uint16_t
824 axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
825                 uint16_t nb_pkts)
826 {
827         PMD_INIT_FUNC_TRACE();
828
829         if (unlikely(nb_pkts == 0))
830                 return nb_pkts;
831
832         struct axgbe_tx_queue *txq;
833         uint16_t nb_desc_free;
834         uint16_t nb_pkt_sent = 0;
835         uint16_t idx;
836         uint32_t tail_addr;
837         struct rte_mbuf *mbuf;
838
839         txq  = (struct axgbe_tx_queue *)tx_queue;
840         nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
841
842         if (unlikely(nb_desc_free <= txq->free_thresh)) {
843                 axgbe_xmit_cleanup(txq);
844                 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
845                 if (unlikely(nb_desc_free == 0))
846                         return 0;
847         }
848         nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
849         while (nb_pkts--) {
850                 mbuf = *tx_pkts++;
851                 if (axgbe_xmit_hw(txq, mbuf))
852                         goto out;
853                 nb_pkt_sent++;
854         }
855 out:
856         /* Sync read and write */
857         rte_mb();
858         idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
859         tail_addr = low32_value(txq->ring_phys_addr +
860                                 idx * sizeof(struct axgbe_tx_desc));
861         /* Update tail reg with next immediate address to kick Tx DMA channel*/
862         AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
863         txq->pkts += nb_pkt_sent;
864         return nb_pkt_sent;
865 }
866
867 void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
868 {
869         PMD_INIT_FUNC_TRACE();
870         uint8_t i;
871         struct axgbe_rx_queue *rxq;
872         struct axgbe_tx_queue *txq;
873
874         for (i = 0; i < dev->data->nb_rx_queues; i++) {
875                 rxq = dev->data->rx_queues[i];
876
877                 if (rxq) {
878                         axgbe_rx_queue_release(rxq);
879                         dev->data->rx_queues[i] = NULL;
880                 }
881         }
882
883         for (i = 0; i < dev->data->nb_tx_queues; i++) {
884                 txq = dev->data->tx_queues[i];
885
886                 if (txq) {
887                         axgbe_tx_queue_release(txq);
888                         dev->data->tx_queues[i] = NULL;
889                 }
890         }
891 }
892
893 int
894 axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
895 {
896         struct axgbe_rx_queue *rxq = rx_queue;
897         volatile union axgbe_rx_desc *desc;
898         uint16_t idx;
899
900
901         if (unlikely(offset >= rxq->nb_desc))
902                 return -EINVAL;
903
904         if (offset >= rxq->nb_desc - rxq->dirty)
905                 return RTE_ETH_RX_DESC_UNAVAIL;
906
907         idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
908         desc = &rxq->desc[idx + offset];
909
910         if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
911                 return RTE_ETH_RX_DESC_DONE;
912
913         return RTE_ETH_RX_DESC_AVAIL;
914 }
915
916 int
917 axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
918 {
919         struct axgbe_tx_queue *txq = tx_queue;
920         volatile struct axgbe_tx_desc *desc;
921         uint16_t idx;
922
923
924         if (unlikely(offset >= txq->nb_desc))
925                 return -EINVAL;
926
927         if (offset >= txq->nb_desc - txq->dirty)
928                 return RTE_ETH_TX_DESC_UNAVAIL;
929
930         idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1);
931         desc = &txq->desc[idx + offset];
932
933         if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
934                 return RTE_ETH_TX_DESC_DONE;
935
936         return RTE_ETH_TX_DESC_FULL;
937 }