1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_ethdev.h"
7 #include "axgbe_rxtx.h"
11 #include <rte_mempool.h>
15 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
18 struct rte_mbuf **sw_ring;
21 sw_ring = rx_queue->sw_ring;
23 for (i = 0; i < rx_queue->nb_desc; i++) {
25 rte_pktmbuf_free(sw_ring[i]);
33 void axgbe_dev_rx_queue_release(void *rxq)
35 axgbe_rx_queue_release(rxq);
38 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
39 uint16_t nb_desc, unsigned int socket_id,
40 const struct rte_eth_rxconf *rx_conf,
41 struct rte_mempool *mp)
43 PMD_INIT_FUNC_TRACE();
45 const struct rte_memzone *dma;
46 struct axgbe_rx_queue *rxq;
47 uint32_t rx_desc = nb_desc;
48 struct axgbe_port *pdata = dev->data->dev_private;
51 * validate Rx descriptors count
52 * should be power of 2 and less than h/w supported
54 if ((!rte_is_power_of_2(rx_desc)) ||
55 rx_desc > pdata->rx_desc_count)
57 /* First allocate the rx queue data structure */
58 rxq = rte_zmalloc_socket("ethdev RX queue",
59 sizeof(struct axgbe_rx_queue),
60 RTE_CACHE_LINE_SIZE, socket_id);
62 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
70 rxq->queue_id = queue_idx;
71 rxq->port_id = dev->data->port_id;
72 rxq->nb_desc = rx_desc;
73 rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
74 (DMA_CH_INC * rxq->queue_id));
75 rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
77 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
78 rxq->crc_len = RTE_ETHER_CRC_LEN;
82 /* CRC strip in AXGBE supports per port not per queue */
83 pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
84 rxq->free_thresh = rx_conf->rx_free_thresh ?
85 rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
86 if (rxq->free_thresh > rxq->nb_desc)
87 rxq->free_thresh = rxq->nb_desc >> 3;
89 /* Allocate RX ring hardware descriptors */
90 size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
91 dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
94 PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
95 axgbe_rx_queue_release(rxq);
98 rxq->ring_phys_addr = (uint64_t)dma->iova;
99 rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
100 memset((void *)rxq->desc, 0, size);
101 /* Allocate software ring */
102 size = rxq->nb_desc * sizeof(struct rte_mbuf *);
103 rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
107 PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
108 axgbe_rx_queue_release(rxq);
111 dev->data->rx_queues[queue_idx] = rxq;
112 if (!pdata->rx_queues)
113 pdata->rx_queues = dev->data->rx_queues;
118 static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
121 unsigned int rx_status;
122 unsigned long rx_timeout;
124 /* The Rx engine cannot be stopped if it is actively processing
125 * packets. Wait for the Rx queue to empty the Rx fifo. Don't
126 * wait forever though...
128 rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
131 while (time_before(rte_get_timer_cycles(), rx_timeout)) {
132 rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
133 if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
134 (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
140 if (!time_before(rte_get_timer_cycles(), rx_timeout))
142 "timed out waiting for Rx queue %u to empty\n",
146 void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
148 struct axgbe_rx_queue *rxq;
149 struct axgbe_port *pdata = dev->data->dev_private;
153 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
154 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
155 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
156 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
158 /* Prepare for Rx DMA channel stop */
159 for (i = 0; i < dev->data->nb_rx_queues; i++) {
160 rxq = dev->data->rx_queues[i];
161 axgbe_prepare_rx_stop(pdata, i);
163 /* Disable each Rx queue */
164 AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
165 for (i = 0; i < dev->data->nb_rx_queues; i++) {
166 rxq = dev->data->rx_queues[i];
167 /* Disable Rx DMA channel */
168 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
172 void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
174 struct axgbe_rx_queue *rxq;
175 struct axgbe_port *pdata = dev->data->dev_private;
177 unsigned int reg_val = 0;
179 for (i = 0; i < dev->data->nb_rx_queues; i++) {
180 rxq = dev->data->rx_queues[i];
181 /* Enable Rx DMA channel */
182 AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
186 for (i = 0; i < pdata->rx_q_count; i++)
187 reg_val |= (0x02 << (i << 1));
188 AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
191 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
192 /* Frame is forwarded after stripping CRC to application*/
193 if (pdata->crc_strip_enable) {
194 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
195 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
197 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
200 /* Rx function one to one refresh */
202 axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
205 PMD_INIT_FUNC_TRACE();
207 struct axgbe_rx_queue *rxq = rx_queue;
208 volatile union axgbe_rx_desc *desc;
209 uint64_t old_dirty = rxq->dirty;
210 struct rte_mbuf *mbuf, *tmbuf;
212 uint32_t error_status;
213 uint16_t idx, pidx, pkt_len;
215 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
216 while (nb_rx < nb_pkts) {
217 if (unlikely(idx == rxq->nb_desc))
220 desc = &rxq->desc[idx];
222 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
224 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
225 if (unlikely(!tmbuf)) {
226 PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
228 (unsigned int)rxq->port_id,
229 (unsigned int)rxq->queue_id);
231 rxq->port_id].data->rx_mbuf_alloc_failed++;
232 rxq->rx_mbuf_alloc_failed++;
236 if (unlikely(pidx == rxq->nb_desc))
239 rte_prefetch0(rxq->sw_ring[pidx]);
240 if ((pidx & 0x3) == 0) {
241 rte_prefetch0(&rxq->desc[pidx]);
242 rte_prefetch0(&rxq->sw_ring[pidx]);
245 mbuf = rxq->sw_ring[idx];
246 /* Check for any errors and free mbuf*/
247 err = AXGMAC_GET_BITS_LE(desc->write.desc3,
248 RX_NORMAL_DESC3, ES);
251 error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
252 if ((error_status != AXGBE_L3_CSUM_ERR) &&
253 (error_status != AXGBE_L4_CSUM_ERR)) {
255 rte_pktmbuf_free(mbuf);
259 if (rxq->pdata->rx_csum_enable) {
261 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
262 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
263 if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
264 mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
265 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
266 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
267 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
269 unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
270 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
271 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
274 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
275 /* Get the RSS hash */
276 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
277 mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
278 pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
282 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
284 mbuf->port = rxq->port_id;
285 mbuf->pkt_len = pkt_len;
286 mbuf->data_len = pkt_len;
287 rxq->bytes += pkt_len;
288 rx_pkts[nb_rx++] = mbuf;
291 rxq->sw_ring[idx++] = tmbuf;
293 rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
294 memset((void *)(&desc->read.desc2), 0, 8);
295 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
299 if (rxq->dirty != old_dirty) {
301 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
302 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
303 low32_value(rxq->ring_phys_addr +
304 (idx * sizeof(union axgbe_rx_desc))));
311 uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
312 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
314 PMD_INIT_FUNC_TRACE();
316 struct axgbe_rx_queue *rxq = rx_queue;
317 volatile union axgbe_rx_desc *desc;
319 uint64_t old_dirty = rxq->dirty;
320 struct rte_mbuf *first_seg = NULL;
321 struct rte_mbuf *mbuf, *tmbuf;
323 uint32_t error_status;
324 uint16_t idx, pidx, data_len = 0, pkt_len = 0;
326 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
327 while (nb_rx < nb_pkts) {
330 if (unlikely(idx == rxq->nb_desc))
333 desc = &rxq->desc[idx];
335 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
338 tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
339 if (unlikely(!tmbuf)) {
340 PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
342 (unsigned int)rxq->port_id,
343 (unsigned int)rxq->queue_id);
344 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
349 if (unlikely(pidx == rxq->nb_desc))
352 rte_prefetch0(rxq->sw_ring[pidx]);
353 if ((pidx & 0x3) == 0) {
354 rte_prefetch0(&rxq->desc[pidx]);
355 rte_prefetch0(&rxq->sw_ring[pidx]);
358 mbuf = rxq->sw_ring[idx];
359 /* Check for any errors and free mbuf*/
360 err = AXGMAC_GET_BITS_LE(desc->write.desc3,
361 RX_NORMAL_DESC3, ES);
364 error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
365 if ((error_status != AXGBE_L3_CSUM_ERR)
366 && (error_status != AXGBE_L4_CSUM_ERR)) {
368 rte_pktmbuf_free(mbuf);
372 rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
374 if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
375 RX_NORMAL_DESC3, LD)) {
377 pkt_len = rxq->buf_size;
381 pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
382 RX_NORMAL_DESC3, PL);
383 data_len = pkt_len - rxq->crc_len;
386 if (first_seg != NULL) {
387 if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
388 rte_mempool_put(rxq->mb_pool,
394 /* Get the RSS hash */
395 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
396 mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
399 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
400 mbuf->data_len = data_len;
404 rxq->sw_ring[idx++] = tmbuf;
406 rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
407 memset((void *)(&desc->read.desc2), 0, 8);
408 AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
412 rte_pktmbuf_free(mbuf);
416 first_seg->pkt_len = pkt_len;
417 rxq->bytes += pkt_len;
420 first_seg->port = rxq->port_id;
421 if (rxq->pdata->rx_csum_enable) {
423 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
424 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
425 if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
426 mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
427 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
428 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
429 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
430 } else if (unlikely(error_status
431 == AXGBE_L4_CSUM_ERR)) {
432 mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
433 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
437 rx_pkts[nb_rx++] = first_seg;
439 /* Setup receipt context for a new packet.*/
443 /* Save receive context.*/
446 if (rxq->dirty != old_dirty) {
448 idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
449 AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
450 low32_value(rxq->ring_phys_addr +
451 (idx * sizeof(union axgbe_rx_desc))));
457 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
460 struct rte_mbuf **sw_ring;
463 sw_ring = tx_queue->sw_ring;
465 for (i = 0; i < tx_queue->nb_desc; i++) {
467 rte_pktmbuf_free(sw_ring[i]);
475 void axgbe_dev_tx_queue_release(void *txq)
477 axgbe_tx_queue_release(txq);
480 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
481 uint16_t nb_desc, unsigned int socket_id,
482 const struct rte_eth_txconf *tx_conf)
484 PMD_INIT_FUNC_TRACE();
486 struct axgbe_port *pdata;
487 struct axgbe_tx_queue *txq;
489 const struct rte_memzone *tz;
492 pdata = dev->data->dev_private;
495 * validate tx descriptors count
496 * should be power of 2 and less than h/w supported
498 if ((!rte_is_power_of_2(tx_desc)) ||
499 tx_desc > pdata->tx_desc_count ||
500 tx_desc < AXGBE_MIN_RING_DESC)
503 /* First allocate the tx queue data structure */
504 txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
505 RTE_CACHE_LINE_SIZE);
510 txq->nb_desc = tx_desc;
511 txq->free_thresh = tx_conf->tx_free_thresh ?
512 tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
513 if (txq->free_thresh > txq->nb_desc)
514 txq->free_thresh = (txq->nb_desc >> 1);
515 txq->free_batch_cnt = txq->free_thresh;
517 /* In vector_tx path threshold should be multiple of queue_size*/
518 if (txq->nb_desc % txq->free_thresh != 0)
519 txq->vector_disable = 1;
521 if (tx_conf->offloads != 0)
522 txq->vector_disable = 1;
524 /* Allocate TX ring hardware descriptors */
525 tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
526 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
527 tsize, AXGBE_DESC_ALIGN, socket_id);
529 axgbe_tx_queue_release(txq);
532 memset(tz->addr, 0, tsize);
533 txq->ring_phys_addr = (uint64_t)tz->iova;
534 txq->desc = tz->addr;
535 txq->queue_id = queue_idx;
536 txq->port_id = dev->data->port_id;
537 txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
538 (DMA_CH_INC * txq->queue_id));
539 txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
543 txq->nb_desc_free = txq->nb_desc;
544 /* Allocate software ring */
545 tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
546 txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
547 RTE_CACHE_LINE_SIZE);
549 axgbe_tx_queue_release(txq);
552 dev->data->tx_queues[queue_idx] = txq;
553 if (!pdata->tx_queues)
554 pdata->tx_queues = dev->data->tx_queues;
556 if (txq->vector_disable)
557 dev->tx_pkt_burst = &axgbe_xmit_pkts;
560 dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
562 dev->tx_pkt_burst = &axgbe_xmit_pkts;
568 static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
571 unsigned int tx_status;
572 unsigned long tx_timeout;
574 /* The Tx engine cannot be stopped if it is actively processing
575 * packets. Wait for the Tx queue to empty the Tx fifo. Don't
576 * wait forever though...
578 tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
580 while (time_before(rte_get_timer_cycles(), tx_timeout)) {
581 tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
582 if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
583 (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
589 if (!time_before(rte_get_timer_cycles(), tx_timeout))
591 "timed out waiting for Tx queue %u to empty\n",
595 static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
598 unsigned int tx_dsr, tx_pos, tx_qidx;
599 unsigned int tx_status;
600 unsigned long tx_timeout;
602 if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
603 return axgbe_txq_prepare_tx_stop(pdata, queue);
605 /* Calculate the status register to read and the position within */
606 if (queue < DMA_DSRX_FIRST_QUEUE) {
608 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
610 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
612 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
613 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
617 /* The Tx engine cannot be stopped if it is actively processing
618 * descriptors. Wait for the Tx engine to enter the stopped or
619 * suspended state. Don't wait forever though...
621 tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
623 while (time_before(rte_get_timer_cycles(), tx_timeout)) {
624 tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
625 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
626 if ((tx_status == DMA_TPS_STOPPED) ||
627 (tx_status == DMA_TPS_SUSPENDED))
633 if (!time_before(rte_get_timer_cycles(), tx_timeout))
635 "timed out waiting for Tx DMA channel %u to stop\n",
639 void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
641 struct axgbe_tx_queue *txq;
642 struct axgbe_port *pdata = dev->data->dev_private;
645 /* Prepare for stopping DMA channel */
646 for (i = 0; i < pdata->tx_q_count; i++) {
647 txq = dev->data->tx_queues[i];
648 axgbe_prepare_tx_stop(pdata, i);
651 AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
652 /* Disable each Tx queue*/
653 for (i = 0; i < pdata->tx_q_count; i++)
654 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
656 /* Disable each Tx DMA channel */
657 for (i = 0; i < dev->data->nb_tx_queues; i++) {
658 txq = dev->data->tx_queues[i];
659 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
663 void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
665 struct axgbe_tx_queue *txq;
666 struct axgbe_port *pdata = dev->data->dev_private;
669 for (i = 0; i < dev->data->nb_tx_queues; i++) {
670 txq = dev->data->tx_queues[i];
671 /* Enable Tx DMA channel */
672 AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
675 for (i = 0; i < pdata->tx_q_count; i++)
676 AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
679 AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
682 /* Free Tx conformed mbufs */
683 static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
685 volatile struct axgbe_tx_desc *desc;
688 idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
689 while (txq->cur != txq->dirty) {
690 if (unlikely(idx == txq->nb_desc))
692 desc = &txq->desc[idx];
693 /* Check for ownership */
694 if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
696 memset((void *)&desc->desc2, 0, 8);
698 rte_pktmbuf_free(txq->sw_ring[idx]);
699 txq->sw_ring[idx++] = NULL;
704 /* Tx Descriptor formation
705 * Considering each mbuf requires one desc
708 static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
709 struct rte_mbuf *mbuf)
711 volatile struct axgbe_tx_desc *desc;
715 idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
716 desc = &txq->desc[idx];
718 /* Update buffer address and length */
719 desc->baddr = rte_mbuf_data_iova(mbuf);
720 AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
722 /* Total msg length to transmit */
723 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
725 /* Mark it as First and Last Descriptor */
726 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
727 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
728 /* Mark it as a NORMAL descriptor */
729 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
730 /* configure h/w Offload */
731 mask = mbuf->ol_flags & PKT_TX_L4_MASK;
732 if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM))
733 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
734 else if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
735 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
739 AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
743 txq->sw_ring[idx] = mbuf;
744 /* Update current index*/
747 txq->bytes += mbuf->pkt_len;
752 /* Eal supported tx wrapper*/
754 axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
757 PMD_INIT_FUNC_TRACE();
759 if (unlikely(nb_pkts == 0))
762 struct axgbe_tx_queue *txq;
763 uint16_t nb_desc_free;
764 uint16_t nb_pkt_sent = 0;
767 struct rte_mbuf *mbuf;
769 txq = (struct axgbe_tx_queue *)tx_queue;
770 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
772 if (unlikely(nb_desc_free <= txq->free_thresh)) {
773 axgbe_xmit_cleanup(txq);
774 nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
775 if (unlikely(nb_desc_free == 0))
778 nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
781 if (axgbe_xmit_hw(txq, mbuf))
786 /* Sync read and write */
788 idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
789 tail_addr = low32_value(txq->ring_phys_addr +
790 idx * sizeof(struct axgbe_tx_desc));
791 /* Update tail reg with next immediate address to kick Tx DMA channel*/
792 AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
793 txq->pkts += nb_pkt_sent;
797 void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
799 PMD_INIT_FUNC_TRACE();
801 struct axgbe_rx_queue *rxq;
802 struct axgbe_tx_queue *txq;
804 for (i = 0; i < dev->data->nb_rx_queues; i++) {
805 rxq = dev->data->rx_queues[i];
808 axgbe_rx_queue_release(rxq);
809 dev->data->rx_queues[i] = NULL;
813 for (i = 0; i < dev->data->nb_tx_queues; i++) {
814 txq = dev->data->tx_queues[i];
817 axgbe_tx_queue_release(txq);
818 dev->data->tx_queues[i] = NULL;
824 axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
826 struct axgbe_rx_queue *rxq = rx_queue;
827 volatile union axgbe_rx_desc *desc;
831 if (unlikely(offset >= rxq->nb_desc))
834 if (offset >= rxq->nb_desc - rxq->dirty)
835 return RTE_ETH_RX_DESC_UNAVAIL;
837 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
838 desc = &rxq->desc[idx + offset];
840 if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
841 return RTE_ETH_RX_DESC_DONE;
843 return RTE_ETH_RX_DESC_AVAIL;
847 axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
849 struct axgbe_tx_queue *txq = tx_queue;
850 volatile struct axgbe_tx_desc *desc;
854 if (unlikely(offset >= txq->nb_desc))
857 if (offset >= txq->nb_desc - txq->dirty)
858 return RTE_ETH_TX_DESC_UNAVAIL;
860 idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1);
861 desc = &txq->desc[idx + offset];
863 if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
864 return RTE_ETH_TX_DESC_DONE;
866 return RTE_ETH_TX_DESC_FULL;