net/enetfec: support Rx/Tx
authorApeksha Gupta <apeksha.gupta@nxp.com>
Mon, 15 Nov 2021 07:19:39 +0000 (12:49 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 15 Nov 2021 15:26:17 +0000 (16:26 +0100)
This patch adds burst enqueue and dequeue operations to the enetfec
PMD. Basic features added like promiscuous enable, basic stats.

Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
doc/guides/nics/enetfec.rst
doc/guides/nics/features/enetfec.ini
drivers/net/enetfec/enet_ethdev.c
drivers/net/enetfec/enet_ethdev.h
drivers/net/enetfec/enet_rxtx.c [new file with mode: 0644]
drivers/net/enetfec/meson.build

index 088e122..09d0b88 100644 (file)
@@ -84,6 +84,8 @@ net_enetfec is logical Ethernet interface, created by ENETFEC driver.
 ENETFEC Features
 ----------------
 
+- Basic stats
+- Promiscuous
 - Linux
 - ARMv8
 
index bdfbdbd..3d8aa5b 100644 (file)
@@ -4,6 +4,8 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Promiscuous mode     = Y
+Basic stats         = Y
 Linux               = Y
 ARMv8               = Y
 Usage doc           = Y
index 8ea8bd2..ea77a96 100644 (file)
@@ -30,6 +30,8 @@
 #define ENETFEC_RAFL_V                 0x8
 #define ENETFEC_OPD_V                  0xFFF0
 
+/* Extended buffer descriptor */
+#define ENETFEC_EXTENDED_BD            0
 #define NUM_OF_BD_QUEUES               6
 
 /* Supported Rx offloads */
@@ -143,6 +145,38 @@ enetfec_restart(struct rte_eth_dev *dev)
        rte_delay_us(10);
 }
 
+static void
+enet_free_buffers(struct rte_eth_dev *dev)
+{
+       struct enetfec_private *fep = dev->data->dev_private;
+       unsigned int i, q;
+       struct rte_mbuf *mbuf;
+       struct bufdesc  *bdp;
+       struct enetfec_priv_rx_q *rxq;
+       struct enetfec_priv_tx_q *txq;
+
+       for (q = 0; q < dev->data->nb_rx_queues; q++) {
+               rxq = fep->rx_queues[q];
+               bdp = rxq->bd.base;
+               for (i = 0; i < rxq->bd.ring_size; i++) {
+                       mbuf = rxq->rx_mbuf[i];
+                       rxq->rx_mbuf[i] = NULL;
+                       rte_pktmbuf_free(mbuf);
+                       bdp = enet_get_nextdesc(bdp, &rxq->bd);
+               }
+       }
+
+       for (q = 0; q < dev->data->nb_tx_queues; q++) {
+               txq = fep->tx_queues[q];
+               bdp = txq->bd.base;
+               for (i = 0; i < txq->bd.ring_size; i++) {
+                       mbuf = txq->tx_mbuf[i];
+                       txq->tx_mbuf[i] = NULL;
+                       rte_pktmbuf_free(mbuf);
+               }
+       }
+}
+
 static int
 enetfec_eth_configure(struct rte_eth_dev *dev)
 {
@@ -156,6 +190,8 @@ static int
 enetfec_eth_start(struct rte_eth_dev *dev)
 {
        enetfec_restart(dev);
+       dev->rx_pkt_burst = &enetfec_recv_pkts;
+       dev->tx_pkt_burst = &enetfec_xmit_pkts;
 
        return 0;
 }
@@ -182,6 +218,101 @@ enetfec_eth_stop(struct rte_eth_dev *dev)
        return 0;
 }
 
+static int
+enetfec_eth_close(struct rte_eth_dev *dev)
+{
+       enet_free_buffers(dev);
+       return 0;
+}
+
+static int
+enetfec_eth_link_update(struct rte_eth_dev *dev,
+                       int wait_to_complete __rte_unused)
+{
+       struct rte_eth_link link;
+       unsigned int lstatus = 1;
+
+       memset(&link, 0, sizeof(struct rte_eth_link));
+
+       link.link_status = lstatus;
+       link.link_speed = RTE_ETH_SPEED_NUM_1G;
+
+       ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
+                        "Up");
+
+       return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+enetfec_promiscuous_enable(struct rte_eth_dev *dev)
+{
+       struct enetfec_private *fep = dev->data->dev_private;
+       uint32_t tmp;
+
+       tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+       tmp |= 0x8;
+       tmp &= ~0x2;
+       rte_write32(rte_cpu_to_le_32(tmp),
+               (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+
+       return 0;
+}
+
+static int
+enetfec_multicast_enable(struct rte_eth_dev *dev)
+{
+       struct enetfec_private *fep = dev->data->dev_private;
+
+       rte_write32(rte_cpu_to_le_32(0xffffffff),
+                       (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+       rte_write32(rte_cpu_to_le_32(0xffffffff),
+                       (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+       dev->data->all_multicast = 1;
+
+       rte_write32(rte_cpu_to_le_32(0x04400002),
+                       (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+       rte_write32(rte_cpu_to_le_32(0x10800049),
+                       (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+
+       return 0;
+}
+
+/* Set a MAC change in hardware. */
+static int
+enetfec_set_mac_address(struct rte_eth_dev *dev,
+                   struct rte_ether_addr *addr)
+{
+       struct enetfec_private *fep = dev->data->dev_private;
+
+       writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
+               (addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
+               (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
+       writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
+               (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
+
+       rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+
+       return 0;
+}
+
+static int
+enetfec_stats_get(struct rte_eth_dev *dev,
+             struct rte_eth_stats *stats)
+{
+       struct enetfec_private *fep = dev->data->dev_private;
+       struct rte_eth_stats *eth_stats = &fep->stats;
+
+       stats->ipackets = eth_stats->ipackets;
+       stats->ibytes = eth_stats->ibytes;
+       stats->ierrors = eth_stats->ierrors;
+       stats->opackets = eth_stats->opackets;
+       stats->obytes = eth_stats->obytes;
+       stats->oerrors = eth_stats->oerrors;
+       stats->rx_nombuf = eth_stats->rx_nombuf;
+
+       return 0;
+}
+
 static int
 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
        struct rte_eth_dev_info *dev_info)
@@ -193,6 +324,18 @@ enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
        return 0;
 }
 
+static void
+enet_free_queue(struct rte_eth_dev *dev)
+{
+       struct enetfec_private *fep = dev->data->dev_private;
+       unsigned int i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++)
+               rte_free(fep->rx_queues[i]);
+       for (i = 0; i < dev->data->nb_tx_queues; i++)
+               rte_free(fep->rx_queues[i]);
+}
+
 static const unsigned short offset_des_active_rxq[] = {
        ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
 };
@@ -396,6 +539,12 @@ static const struct eth_dev_ops enetfec_ops = {
        .dev_configure          = enetfec_eth_configure,
        .dev_start              = enetfec_eth_start,
        .dev_stop               = enetfec_eth_stop,
+       .dev_close              = enetfec_eth_close,
+       .link_update            = enetfec_eth_link_update,
+       .promiscuous_enable     = enetfec_promiscuous_enable,
+       .allmulticast_enable    = enetfec_multicast_enable,
+       .mac_addr_set           = enetfec_set_mac_address,
+       .stats_get              = enetfec_stats_get,
        .dev_infos_get          = enetfec_eth_info,
        .rx_queue_setup         = enetfec_rx_queue_setup,
        .tx_queue_setup         = enetfec_tx_queue_setup
@@ -421,6 +570,9 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
        int rc;
        int i;
        unsigned int bdsize;
+       struct rte_ether_addr macaddr = {
+               .addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
+       };
 
        name = rte_vdev_device_name(vdev);
        ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name);
@@ -461,6 +613,21 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
                fep->bd_addr_p = fep->bd_addr_p + bdsize;
        }
 
+       /* Copy the station address into the dev structure, */
+       dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
+       if (dev->data->mac_addrs == NULL) {
+               ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
+                       RTE_ETHER_ADDR_LEN);
+               rc = -ENOMEM;
+               goto err;
+       }
+
+       /*
+        * Set default mac address
+        */
+       enetfec_set_mac_address(dev, &macaddr);
+
+       fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
        rc = enetfec_eth_init(dev);
        if (rc)
                goto failed_init;
@@ -469,6 +636,8 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
 
 failed_init:
        ENETFEC_PMD_ERR("Failed to init");
+err:
+       rte_eth_dev_release_port(dev);
        return rc;
 }
 
@@ -476,6 +645,8 @@ static int
 pmd_enetfec_remove(struct rte_vdev_device *vdev)
 {
        struct rte_eth_dev *eth_dev = NULL;
+       struct enetfec_private *fep;
+       struct enetfec_priv_rx_q *rxq;
        int ret;
 
        /* find the ethdev entry */
@@ -483,11 +654,22 @@ pmd_enetfec_remove(struct rte_vdev_device *vdev)
        if (eth_dev == NULL)
                return -ENODEV;
 
+       fep = eth_dev->data->dev_private;
+       /* Free descriptor base of first RX queue as it was configured
+        * first in enetfec_eth_init().
+        */
+       rxq = fep->rx_queues[0];
+       rte_free(rxq->bd.base);
+       enet_free_queue(eth_dev);
+       enetfec_eth_stop(eth_dev);
+
        ret = rte_eth_dev_release_port(eth_dev);
        if (ret != 0)
                return -EINVAL;
 
        ENETFEC_PMD_INFO("Release enetfec sw device");
+       enetfec_cleanup(fep);
+
        return 0;
 }
 
index 27e124c..06a6c10 100644 (file)
@@ -7,6 +7,10 @@
 
 #include <rte_ethdev.h>
 
+#define BD_LEN                 49152
+#define ENETFEC_TX_FR_SIZE     2048
+#define ETH_HLEN               RTE_ETHER_HDR_LEN
+
 /* full duplex */
 #define FULL_DUPLEX            0x00
 
 #define ENETFEC_MAX_RX_PKT_LEN 3000
 
 #define __iomem
+#if defined(RTE_ARCH_ARM)
+#if defined(RTE_ARCH_64)
+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
+#define dcbf_64(p) dcbf(p)
+
+#else /* RTE_ARCH_32 */
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#endif
+
+#else
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#endif
+
 /*
  * ENETFEC can support 1 rx and tx queue..
  */
@@ -71,6 +90,7 @@ struct enetfec_priv_rx_q {
 
 struct enetfec_private {
        struct rte_eth_dev      *dev;
+       struct rte_eth_stats    stats;
        int                     full_duplex;
        int                     flag_pause;
        uint32_t                quirks;
@@ -123,4 +143,9 @@ enet_get_bd_index(struct bufdesc *bdp, struct bufdesc_prop *bd)
        return ((const char *)bdp - (const char *)bd->base) >> bd->d_size_log2;
 }
 
+uint16_t enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts,
+               uint16_t nb_pkts);
+uint16_t enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts);
+
 #endif /*__ENETFEC_ETHDEV_H__*/
diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c
new file mode 100644 (file)
index 0000000..d7ec139
--- /dev/null
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <rte_mbuf.h>
+#include <rte_io.h>
+#include "enet_regs.h"
+#include "enet_ethdev.h"
+#include "enet_pmd_logs.h"
+
+/* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
+ * When update through the ring, just set the empty indicator.
+ */
+uint16_t
+enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts,
+               uint16_t nb_pkts)
+{
+       struct rte_mempool *pool;
+       struct bufdesc *bdp;
+       struct rte_mbuf *mbuf, *new_mbuf = NULL;
+       unsigned short status;
+       unsigned short pkt_len;
+       int pkt_received = 0, index = 0;
+       void *data;
+       struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
+       struct rte_eth_stats *stats = &rxq->fep->stats;
+       pool = rxq->pool;
+       bdp = rxq->bd.cur;
+
+       /* Process the incoming packet */
+       status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+       while ((status & RX_BD_EMPTY) == 0) {
+               if (pkt_received >= nb_pkts)
+                       break;
+
+               new_mbuf = rte_pktmbuf_alloc(pool);
+               if (unlikely(new_mbuf == NULL)) {
+                       stats->rx_nombuf++;
+                       break;
+               }
+               /* Check for errors. */
+               status ^= RX_BD_LAST;
+               if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+                       RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+                       RX_BD_TR)) {
+                       stats->ierrors++;
+                       if (status & RX_BD_OV) {
+                               /* FIFO overrun */
+                               /* enet_dump_rx(rxq); */
+                               ENETFEC_DP_LOG(DEBUG, "rx_fifo_error");
+                               goto rx_processing_done;
+                       }
+                       if (status & (RX_BD_LG | RX_BD_SH
+                                               | RX_BD_LAST)) {
+                               /* Frame too long or too short. */
+                               ENETFEC_DP_LOG(DEBUG, "rx_length_error");
+                               if (status & RX_BD_LAST)
+                                       ENETFEC_DP_LOG(DEBUG, "rcv is not +last");
+                       }
+                       if (status & RX_BD_CR) {     /* CRC Error */
+                               ENETFEC_DP_LOG(DEBUG, "rx_crc_errors");
+                       }
+                       /* Report late collisions as a frame error. */
+                       if (status & (RX_BD_NO | RX_BD_TR))
+                               ENETFEC_DP_LOG(DEBUG, "rx_frame_error");
+                       goto rx_processing_done;
+               }
+
+               /* Process the incoming frame. */
+               stats->ipackets++;
+               pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
+               stats->ibytes += pkt_len;
+
+               /* shows data with respect to the data_off field. */
+               index = enet_get_bd_index(bdp, &rxq->bd);
+               mbuf = rxq->rx_mbuf[index];
+
+               data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+               rte_prefetch0(data);
+               rte_pktmbuf_append((struct rte_mbuf *)mbuf,
+                               pkt_len - 4);
+
+               if (rxq->fep->quirks & QUIRK_RACC)
+                       data = rte_pktmbuf_adj(mbuf, 2);
+
+               rx_pkts[pkt_received] = mbuf;
+               pkt_received++;
+               rxq->rx_mbuf[index] = new_mbuf;
+               rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+                               &bdp->bd_bufaddr);
+rx_processing_done:
+               /* when rx_processing_done clear the status flags
+                * for this buffer
+                */
+               status &= ~RX_BD_STATS;
+
+               /* Mark the buffer empty */
+               status |= RX_BD_EMPTY;
+
+               if (rxq->fep->bufdesc_ex) {
+                       struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+                       rte_write32(rte_cpu_to_le_32(RX_BD_INT),
+                                   &ebdp->bd_esc);
+                       rte_write32(0, &ebdp->bd_prot);
+                       rte_write32(0, &ebdp->bd_bdu);
+               }
+
+               /* Make sure the updates to rest of the descriptor are
+                * performed before transferring ownership.
+                */
+               rte_wmb();
+               rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+               /* Update BD pointer to next entry */
+               bdp = enet_get_nextdesc(bdp, &rxq->bd);
+
+               /* Doing this here will keep the FEC running while we process
+                * incoming frames.
+                */
+               rte_write32(0, rxq->bd.active_reg_desc);
+               status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+       }
+       rxq->bd.cur = bdp;
+       return pkt_received;
+}
+
+uint16_t
+enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       struct enetfec_priv_tx_q *txq  =
+                       (struct enetfec_priv_tx_q *)tx_queue;
+       struct rte_eth_stats *stats = &txq->fep->stats;
+       struct bufdesc *bdp, *last_bdp;
+       struct rte_mbuf *mbuf;
+       unsigned short status;
+       unsigned short buflen;
+       unsigned int index, estatus = 0;
+       unsigned int i, pkt_transmitted = 0;
+       uint8_t *data;
+       int tx_st = 1;
+
+       while (tx_st) {
+               if (pkt_transmitted >= nb_pkts) {
+                       tx_st = 0;
+                       break;
+               }
+               bdp = txq->bd.cur;
+               /* First clean the ring */
+               index = enet_get_bd_index(bdp, &txq->bd);
+               status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+
+               if (status & TX_BD_READY) {
+                       stats->oerrors++;
+                       break;
+               }
+               if (txq->tx_mbuf[index]) {
+                       rte_pktmbuf_free(txq->tx_mbuf[index]);
+                       txq->tx_mbuf[index] = NULL;
+               }
+
+               mbuf = *(tx_pkts);
+               tx_pkts++;
+
+               /* Fill in a Tx ring entry */
+               last_bdp = bdp;
+               status &= ~TX_BD_STATS;
+
+               /* Set buffer length and buffer pointer */
+               buflen = rte_pktmbuf_pkt_len(mbuf);
+               stats->opackets++;
+               stats->obytes += buflen;
+
+               if (mbuf->nb_segs > 1) {
+                       ENETFEC_DP_LOG(DEBUG, "SG not supported");
+                       return -1;
+               }
+               status |= (TX_BD_LAST);
+               data = rte_pktmbuf_mtod(mbuf, void *);
+               for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
+                       dcbf(data + i);
+
+               rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+                           &bdp->bd_bufaddr);
+               rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
+
+               if (txq->fep->bufdesc_ex) {
+                       struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+                       rte_write32(0, &ebdp->bd_bdu);
+                       rte_write32(rte_cpu_to_le_32(estatus),
+                                   &ebdp->bd_esc);
+               }
+
+               index = enet_get_bd_index(last_bdp, &txq->bd);
+               /* Save mbuf pointer */
+               txq->tx_mbuf[index] = mbuf;
+
+               /* Make sure the updates to rest of the descriptor are performed
+                * before transferring ownership.
+                */
+               status |= (TX_BD_READY | TX_BD_TC);
+               rte_wmb();
+               rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+               /* Trigger transmission start */
+               rte_write32(0, txq->bd.active_reg_desc);
+               pkt_transmitted++;
+
+               /* If this was the last BD in the ring, start at the
+                * beginning again.
+                */
+               bdp = enet_get_nextdesc(last_bdp, &txq->bd);
+
+               /* Make sure the update to bdp and tx_skbuff are performed
+                * before txq->bd.cur.
+                */
+               txq->bd.cur = bdp;
+       }
+       return nb_pkts;
+}
index b218d13..29a4644 100644 (file)
@@ -9,4 +9,5 @@ endif
 sources = files(
         'enet_ethdev.c',
         'enet_uio.c',
+        'enet_rxtx.c',
 )