F: doc/guides/nics/szedata2.rst
F: doc/guides/nics/features/szedata2.ini
+Netcope nfb
+M: Rastislav Cernay <cernay@netcope.com>
+M: Jan Remes <remes@netcope.com>
+F: drivers/net/nfb/
+F: doc/guides/nics/nfb.rst
+F: doc/guides/nics/features/nfb.ini
+
Netronome nfp
M: Alejandro Lucero <alejandro.lucero@netronome.com>
F: drivers/net/nfp/
#
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
+#
+# Compile software PMD backed by NFB device
+#
+CONFIG_RTE_LIBRTE_NFB_PMD=n
+
#
# Compile burst-oriented Cavium Thunderx NICVF PMD driver
#
# - DPDK_DEP_SSL (y/[n])
# - DPDK_DEP_IPSEC_MB (y/[n])
# - DPDK_DEP_SZE (y/[n])
+# - DPDK_DEP_NFB (y/[n])
# - DPDK_DEP_ZLIB (y/[n])
# - DPDK_MAKE_JOBS (int)
# - DPDK_NOTIFY (notify-send)
--- /dev/null
+;
+; Supported features of the 'nfb' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities = P
+Link status = Y
+Queue start/stop = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+Basic stats = Y
+Extended stats = Y
+Stats per queue = Y
+Other kdrv = Y
+x86-64 = Y
+Usage doc = Y
mvneta
mvpp2
netvsc
+ nfb
nfp
octeontx
qede
--- /dev/null
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2019 Cesnet
+ Copyright 2019 Netcope Technologies
+
+NFB poll mode driver library
+=================================
+
+The NFB poll mode driver library implements support for the Netcope
+FPGA Boards (**NFB-***), FPGA-based programmable NICs.
+The NFB PMD uses interface provided by the libnfb library to communicate
+with the NFB cards over the nfb layer.
+
+More information about the
+`NFB cards <http://www.netcope.com/en/products/fpga-boards>`_
+and used technology
+(`Netcope Development Kit <http://www.netcope.com/en/products/fpga-development-kit>`_)
+can be found on the `Netcope Technologies website <http://www.netcope.com/>`_.
+
+.. note::
+
+ This driver has external dependencies.
+ Therefore it is disabled in default configuration files.
+ It can be enabled by setting ``CONFIG_RTE_LIBRTE_NFB_PMD=y``
+ and recompiling.
+
+.. note::
+
+ Currently the driver is supported only on x86_64 architectures.
+ Only x86_64 versions of the external libraries are provided.
+
+Prerequisites
+-------------
+
+This PMD requires kernel modules which are responsible for initialization and
+allocation of resources needed for nfb layer function.
+Communication between PMD and kernel modules is mediated by libnfb library.
+These kernel modules and library are not part of DPDK and must be installed
+separately:
+
+* **libnfb library**
+
+ The library provides API for initialization of nfb transfers, receiving and
+ transmitting data segments.
+
+* **Kernel modules**
+
+ * nfb
+
+ Kernel modules manage initialization of hardware, allocation and
+ sharing of resources for user space applications.
+
+Dependencies can be found here:
+`Netcope common <https://www.netcope.com/en/company/community-support/dpdk-libsze2#NFB>`_.
+
+Versions of the packages
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The minimum version of the provided packages:
+
+* for DPDK from 19.05
+
+Configuration
+-------------
+
+These configuration options can be modified before compilation in the
+``.config`` file:
+
+* ``CONFIG_RTE_LIBRTE_NFB_PMD`` default value: **n**
+
+ Value **y** enables compilation of nfb PMD.
+
+Using the NFB PMD
+----------------------
+
+Kernel modules have to be loaded before running the DPDK application.
+
+NFB card architecture
+---------------------
+
+The NFB cards are multi-port multi-queue cards, where (generally) data from any
+Ethernet port may be sent to any queue.
+They are represented in DPDK as a single port.
+
+NFB-200G2QL card employs an addon cable which allows to connect it to two
+physical PCI-E slots at the same time (see the diagram below).
+This is done to allow 200 Gbps of traffic to be transferred through the PCI-E
+bus (note that a single PCI-E 3.0 x16 slot provides only 125 Gbps theoretical
+throughput).
+
+Although each slot may be connected to a different CPU and therefore to a different
+NUMA node, the card is represented as a single port in DPDK. To work with data
+from the individual queues on the right NUMA node, connection of NUMA nodes on
+first and last queue (each NUMA node has half of the queues) need to be checked.
+
+.. figure:: img/szedata2_nfb200g_architecture.*
+ :align: center
+
+ NFB-200G2QL high-level diagram
+
+Limitations
+-----------
+
+Driver is usable only on Linux architecture, namely on CentOS.
+
+Since a card is always represented as a single port, but can be connected to two
+NUMA nodes, there is need for manual check where master/slave is connected.
+
+Example of usage
+----------------
+
+Read packets from 0. and 1. receive queue and write them to 0. and 1.
+transmit queue:
+
+.. code-block:: console
+
+ $RTE_TARGET/app/testpmd -l 0-3 -n 2 \
+ -- --port-topology=chained --rxq=2 --txq=2 --nb-cores=2 -i -a
+
+Example output:
+
+.. code-block:: console
+
+ [...]
+ EAL: PCI device 0000:06:00.0 on NUMA socket -1
+ EAL: probe driver: 1b26:c1c1 net_nfb
+ PMD: Initializing NFB device (0000:06:00.0)
+ PMD: Available DMA queues RX: 8 TX: 8
+ PMD: NFB device (0000:06:00.0) successfully initialized
+ Interactive-mode selected
+ Auto-start selected
+ Configuring Port 0 (socket 0)
+ Port 0: 00:11:17:00:00:00
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ Start automatic packet forwarding
+ io packet forwarding - CRC stripping disabled - packets/burst=32
+ nb forwarding cores=2 - nb forwarding ports=1
+ RX queues=2 - RX desc=128 - RX free threshold=0
+ RX threshold registers: pthresh=0 hthresh=0 wthresh=0
+ TX queues=2 - TX desc=512 - TX free threshold=0
+ TX threshold registers: pthresh=0 hthresh=0 wthresh=0
+ TX RS bit threshold=0 - TXQ flags=0x0
+ testpmd>
and receive raw packets through the socket which would bypass the kernel
network stack to achieve high performance packet processing.
+* **Added a net PMD NFB.**
+
+ Added the new ``nfb`` net driver for Netcope NFB cards. See
+ the :doc:`../nics/nfb` NIC guide for more details on this new driver.
+
* **Updated Solarflare network PMD.**
Updated the sfc_efx driver including the following changes:
DIRS-$(CONFIG_RTE_LIBRTE_MVNETA_PMD) += mvneta
DIRS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mvpp2
DIRS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += netvsc
+DIRS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb
DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null
'mvneta',
'mvpp2',
'netvsc',
+ 'nfb',
'nfp',
'null', 'octeontx', 'pcap', 'qede', 'ring',
'sfc',
--- /dev/null
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Cesnet
+# Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_nfb.a
+
+INCLUDES :=-I$(SRCDIR)
+
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --cflags netcope-common)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_ethdev -lrte_net
+LDLIBS += -lrte_bus_pci
+LDLIBS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --libs netcope-common)
+
+EXPORT_MAP := rte_pmd_nfb_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb_rxmode.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
--- /dev/null
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Cesnet
+# Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+# All rights reserved.
+
+dep = cc.find_library('nfb', required: false)
+
+build = dep.found() and cc.has_header('nfb/nfb.h', dependencies: dep)
+
+nc = dependency('netcope-common', required: false)
+
+ext_deps += dep
+ext_deps += nc
+
+sources = files('nfb_rx.c', 'nfb_tx.c', 'nfb_stats.c', 'nfb_ethdev.c', 'nfb_rxmode.c')
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Cesnet
+ * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+ * All rights reserved.
+ */
+
+#ifndef _NFB_H_
+#define _NFB_H_
+
+#include <nfb/nfb.h>
+#include <nfb/ndp.h>
+#include <netcope/rxmac.h>
+#include <netcope/txmac.h>
+
+#include "nfb_rx.h"
+#include "nfb_tx.h"
+
+/* PCI Vendor ID */
+#define PCI_VENDOR_ID_NETCOPE 0x1b26
+
+/* PCI Device IDs */
+#define PCI_DEVICE_ID_NFB_40G2 0xcb80
+#define PCI_DEVICE_ID_NFB_100G2 0xc2c1
+#define PCI_DEVICE_ID_NFB_200G2QL 0xc250
+
+/* Max index of ndp rx/tx queues */
+#define RTE_ETH_NDP_MAX_RX_QUEUES 32
+#define RTE_ETH_NDP_MAX_TX_QUEUES 32
+
+/* Max index of rx/tx dmas */
+#define RTE_MAX_NC_RXMAC 256
+#define RTE_MAX_NC_TXMAC 256
+
+#define RTE_NFB_DRIVER_NAME net_nfb
+
+struct pmd_internals {
+ uint16_t max_rxmac;
+ uint16_t max_txmac;
+ struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC];
+ struct nc_txmac *txmac[RTE_MAX_NC_TXMAC];
+
+ char nfb_dev[PATH_MAX];
+ struct nfb_device *nfb;
+ /* Place to remember if filter was promiscuous or filtering by table,
+ * when disabling allmulticast
+ */
+ enum nc_rxmac_mac_filter rx_filter_original;
+};
+
+#endif /* _NFB_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Cesnet
+ * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+ * All rights reserved.
+ */
+
+#include <nfb/nfb.h>
+#include <nfb/ndp.h>
+#include <netcope/rxmac.h>
+#include <netcope/txmac.h>
+
+#include <rte_ethdev_pci.h>
+
+#include "nfb_stats.h"
+#include "nfb_rx.h"
+#include "nfb_tx.h"
+#include "nfb_rxmode.h"
+#include "nfb.h"
+
+/**
+ * Default MAC addr
+ */
+static const struct ether_addr eth_addr = {
+ .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
+};
+
+/**
+ * Open all RX DMA queues
+ *
+ * @param dev
+ * Pointer to nfb device.
+ * @param[out] rxmac
+ * Pointer to output array of nc_rxmac
+ * @param[out] max_rxmac
+ * Pointer to output max index of rxmac
+ */
+static void
+nfb_nc_rxmac_init(struct nfb_device *nfb,
+ struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC],
+ uint16_t *max_rxmac)
+{
+ *max_rxmac = 0;
+ while ((rxmac[*max_rxmac] = nc_rxmac_open_index(nfb, *max_rxmac)))
+ ++(*max_rxmac);
+}
+
+/**
+ * Open all TX DMA queues
+ *
+ * @param dev
+ * Pointer to nfb device.
+ * @param[out] txmac
+ * Pointer to output array of nc_txmac
+ * @param[out] max_rxmac
+ * Pointer to output max index of txmac
+ */
+static void
+nfb_nc_txmac_init(struct nfb_device *nfb,
+ struct nc_txmac *txmac[RTE_MAX_NC_TXMAC],
+ uint16_t *max_txmac)
+{
+ *max_txmac = 0;
+ while ((txmac[*max_txmac] = nc_txmac_open_index(nfb, *max_txmac)))
+ ++(*max_txmac);
+}
+
+/**
+ * Close all RX DMA queues
+ *
+ * @param rxmac
+ * Pointer to array of nc_rxmac
+ * @param max_rxmac
+ * Maximum index of rxmac
+ */
+static void
+nfb_nc_rxmac_deinit(struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC],
+ uint16_t max_rxmac)
+{
+ for (; max_rxmac > 0; --max_rxmac) {
+ nc_rxmac_close(rxmac[max_rxmac]);
+ rxmac[max_rxmac] = NULL;
+ }
+}
+
+/**
+ * Close all TX DMA queues
+ *
+ * @param txmac
+ * Pointer to array of nc_txmac
+ * @param max_txmac
+ * Maximum index of txmac
+ */
+static void
+nfb_nc_txmac_deinit(struct nc_txmac *txmac[RTE_MAX_NC_TXMAC],
+ uint16_t max_txmac)
+{
+ for (; max_txmac > 0; --max_txmac) {
+ nc_txmac_close(txmac[max_txmac]);
+ txmac[max_txmac] = NULL;
+ }
+}
+
+/**
+ * DPDK callback to start the device.
+ *
+ * Start device by starting all configured queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+nfb_eth_dev_start(struct rte_eth_dev *dev)
+{
+ int ret;
+ uint16_t i;
+ uint16_t nb_rx = dev->data->nb_rx_queues;
+ uint16_t nb_tx = dev->data->nb_tx_queues;
+
+ for (i = 0; i < nb_rx; i++) {
+ ret = nfb_eth_rx_queue_start(dev, i);
+ if (ret != 0)
+ goto err_rx;
+ }
+
+ for (i = 0; i < nb_tx; i++) {
+ ret = nfb_eth_tx_queue_start(dev, i);
+ if (ret != 0)
+ goto err_tx;
+ }
+
+ return 0;
+
+err_tx:
+ for (i = 0; i < nb_tx; i++)
+ nfb_eth_tx_queue_stop(dev, i);
+err_rx:
+ for (i = 0; i < nb_rx; i++)
+ nfb_eth_rx_queue_stop(dev, i);
+ return ret;
+}
+
+/**
+ * DPDK callback to stop the device.
+ *
+ * Stop device by stopping all configured queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+nfb_eth_dev_stop(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ uint16_t nb_rx = dev->data->nb_rx_queues;
+ uint16_t nb_tx = dev->data->nb_tx_queues;
+
+ for (i = 0; i < nb_tx; i++)
+ nfb_eth_tx_queue_stop(dev, i);
+
+ for (i = 0; i < nb_rx; i++)
+ nfb_eth_rx_queue_stop(dev, i);
+}
+
+/**
+ * DPDK callback for Ethernet device configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+nfb_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+/**
+ * DPDK callback to get information about the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] info
+ * Info structure output buffer.
+ */
+static void
+nfb_eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)-1;
+ dev_info->max_rx_queues = dev->data->nb_rx_queues;
+ dev_info->max_tx_queues = dev->data->nb_tx_queues;
+ dev_info->speed_capa = ETH_LINK_SPEED_100G;
+}
+
+/**
+ * DPDK callback to close the device.
+ *
+ * Destroy all queues and objects, free memory.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+nfb_eth_dev_close(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ uint16_t nb_rx = dev->data->nb_rx_queues;
+ uint16_t nb_tx = dev->data->nb_tx_queues;
+
+ nfb_eth_dev_stop(dev);
+
+ for (i = 0; i < nb_rx; i++) {
+ nfb_eth_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+ for (i = 0; i < nb_tx; i++) {
+ nfb_eth_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] link
+ * Storage for current link status.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+nfb_eth_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ uint16_t i;
+ struct nc_rxmac_status status;
+ struct rte_eth_link link;
+ memset(&link, 0, sizeof(link));
+
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ status.speed = MAC_SPEED_UNKNOWN;
+
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_status = ETH_LINK_DOWN;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = ETH_LINK_SPEED_FIXED;
+
+ if (internals->rxmac[0] != NULL) {
+ nc_rxmac_read_status(internals->rxmac[0], &status);
+
+ switch (status.speed) {
+ case MAC_SPEED_10G:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case MAC_SPEED_40G:
+ link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ case MAC_SPEED_100G:
+ link.link_speed = ETH_SPEED_NUM_100G;
+ break;
+ default:
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ break;
+ }
+ }
+
+ for (i = 0; i < internals->max_rxmac; ++i) {
+ nc_rxmac_read_status(internals->rxmac[i], &status);
+
+ if (status.enabled && status.link_up) {
+ link.link_status = ETH_LINK_UP;
+ break;
+ }
+ }
+
+ rte_eth_linkstatus_set(dev, &link);
+
+ return 0;
+}
+
+/**
+ * DPDK callback to bring the link UP.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+nfb_eth_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ uint16_t i;
+ for (i = 0; i < internals->max_rxmac; ++i)
+ nc_rxmac_enable(internals->rxmac[i]);
+
+ for (i = 0; i < internals->max_txmac; ++i)
+ nc_txmac_enable(internals->txmac[i]);
+
+ return 0;
+}
+
+/**
+ * DPDK callback to bring the link DOWN.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+nfb_eth_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ uint16_t i;
+ for (i = 0; i < internals->max_rxmac; ++i)
+ nc_rxmac_disable(internals->rxmac[i]);
+
+ for (i = 0; i < internals->max_txmac; ++i)
+ nc_txmac_disable(internals->txmac[i]);
+
+ return 0;
+}
+
+/**
+ * DPDK callback to set primary MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+nfb_eth_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ unsigned int i;
+ uint64_t mac = 0;
+ struct rte_eth_dev_data *data = dev->data;
+ struct pmd_internals *internals = (struct pmd_internals *)
+ data->dev_private;
+
+ if (!is_valid_assigned_ether_addr(mac_addr))
+ return -EINVAL;
+
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ mac <<= 8;
+ mac |= mac_addr->addr_bytes[i] & 0xFF;
+ }
+
+ for (i = 0; i < internals->max_rxmac; ++i)
+ nc_rxmac_set_mac(internals->rxmac[i], 0, mac, 1);
+
+ ether_addr_copy(mac_addr, data->mac_addrs);
+ return 0;
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = nfb_eth_dev_start,
+ .dev_stop = nfb_eth_dev_stop,
+ .dev_set_link_up = nfb_eth_dev_set_link_up,
+ .dev_set_link_down = nfb_eth_dev_set_link_down,
+ .dev_close = nfb_eth_dev_close,
+ .dev_configure = nfb_eth_dev_configure,
+ .dev_infos_get = nfb_eth_dev_info,
+ .promiscuous_enable = nfb_eth_promiscuous_enable,
+ .promiscuous_disable = nfb_eth_promiscuous_disable,
+ .allmulticast_enable = nfb_eth_allmulticast_enable,
+ .allmulticast_disable = nfb_eth_allmulticast_disable,
+ .rx_queue_start = nfb_eth_rx_queue_start,
+ .rx_queue_stop = nfb_eth_rx_queue_stop,
+ .tx_queue_start = nfb_eth_tx_queue_start,
+ .tx_queue_stop = nfb_eth_tx_queue_stop,
+ .rx_queue_setup = nfb_eth_rx_queue_setup,
+ .tx_queue_setup = nfb_eth_tx_queue_setup,
+ .rx_queue_release = nfb_eth_rx_queue_release,
+ .tx_queue_release = nfb_eth_tx_queue_release,
+ .link_update = nfb_eth_link_update,
+ .stats_get = nfb_eth_stats_get,
+ .stats_reset = nfb_eth_stats_reset,
+ .mac_addr_set = nfb_eth_mac_addr_set,
+};
+
+/**
+ * DPDK callback to initialize an ethernet device
+ *
+ * @param dev
+ * Pointer to ethernet device structure
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+nfb_eth_dev_init(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct pmd_internals *internals = (struct pmd_internals *)
+ data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_pci_addr *pci_addr = &pci_dev->addr;
+ struct ether_addr eth_addr_init;
+
+ RTE_LOG(INFO, PMD, "Initializing NFB device (" PCI_PRI_FMT ")\n",
+ pci_addr->domain, pci_addr->bus, pci_addr->devid,
+ pci_addr->function);
+
+ snprintf(internals->nfb_dev, PATH_MAX,
+ "/dev/nfb/by-pci-slot/" PCI_PRI_FMT,
+ pci_addr->domain, pci_addr->bus, pci_addr->devid,
+ pci_addr->function);
+
+ /*
+ * Get number of available DMA RX and TX queues, which is maximum
+ * number of queues that can be created and store it in private device
+ * data structure.
+ */
+ internals->nfb = nfb_open(internals->nfb_dev);
+ if (internals->nfb == NULL) {
+ RTE_LOG(ERR, PMD, "nfb_open(): failed to open %s",
+ internals->nfb_dev);
+ return -EINVAL;
+ }
+ data->nb_rx_queues = ndp_get_rx_queue_available_count(internals->nfb);
+ data->nb_tx_queues = ndp_get_tx_queue_available_count(internals->nfb);
+
+ RTE_LOG(INFO, PMD, "Available NDP queues RX: %u TX: %u\n",
+ data->nb_rx_queues, data->nb_tx_queues);
+
+ nfb_nc_rxmac_init(internals->nfb,
+ internals->rxmac,
+ &internals->max_rxmac);
+ nfb_nc_txmac_init(internals->nfb,
+ internals->txmac,
+ &internals->max_txmac);
+
+ /* Set rx, tx burst functions */
+ dev->rx_pkt_burst = nfb_eth_ndp_rx;
+ dev->tx_pkt_burst = nfb_eth_ndp_tx;
+
+ /* Set function callbacks for Ethernet API */
+ dev->dev_ops = &ops;
+
+ /* Get link state */
+ nfb_eth_link_update(dev, 0);
+
+ /* Allocate space for one mac address */
+ data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr),
+ RTE_CACHE_LINE_SIZE);
+ if (data->mac_addrs == NULL) {
+ RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n");
+ nfb_close(internals->nfb);
+ return -EINVAL;
+ }
+
+ eth_random_addr(eth_addr_init.addr_bytes);
+ eth_addr_init.addr_bytes[0] = eth_addr.addr_bytes[0];
+ eth_addr_init.addr_bytes[1] = eth_addr.addr_bytes[1];
+ eth_addr_init.addr_bytes[2] = eth_addr.addr_bytes[2];
+
+ nfb_eth_mac_addr_set(dev, ð_addr_init);
+
+ data->promiscuous = nfb_eth_promiscuous_get(dev);
+ data->all_multicast = nfb_eth_allmulticast_get(dev);
+ internals->rx_filter_original = data->promiscuous;
+
+ RTE_LOG(INFO, PMD, "NFB device ("
+ PCI_PRI_FMT ") successfully initialized\n",
+ pci_addr->domain, pci_addr->bus, pci_addr->devid,
+ pci_addr->function);
+
+ return 0;
+}
+
+/**
+ * DPDK callback to uninitialize an ethernet device
+ *
+ * @param dev
+ * Pointer to ethernet device structure
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+nfb_eth_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct pmd_internals *internals = (struct pmd_internals *)
+ data->dev_private;
+
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_pci_addr *pci_addr = &pci_dev->addr;
+
+ dev->data->mac_addrs = NULL;
+
+ nfb_nc_rxmac_deinit(internals->rxmac, internals->max_rxmac);
+ nfb_nc_txmac_deinit(internals->txmac, internals->max_txmac);
+
+ RTE_LOG(INFO, PMD, "NFB device ("
+ PCI_PRI_FMT ") successfully uninitialized\n",
+ pci_addr->domain, pci_addr->bus, pci_addr->devid,
+ pci_addr->function);
+
+ return 0;
+}
+
+static const struct rte_pci_id nfb_pci_id_table[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_40G2) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_100G2) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_200G2QL) },
+ { .vendor_id = 0, }
+};
+
+/**
+ * DPDK callback to register a PCI device.
+ *
+ * This function spawns Ethernet devices out of a given PCI device.
+ *
+ * @param[in] pci_drv
+ * PCI driver structure (nfb_driver).
+ * @param[in] pci_dev
+ * PCI device information.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+nfb_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct pmd_internals), nfb_eth_dev_init);
+}
+
+/**
+ * DPDK callback to remove a PCI device.
+ *
+ * This function removes all Ethernet devices belong to a given PCI device.
+ *
+ * @param[in] pci_dev
+ * Pointer to the PCI device.
+ *
+ * @return
+ * 0 on success, the function cannot fail.
+ */
+static int
+nfb_eth_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, nfb_eth_dev_uninit);
+}
+
+static struct rte_pci_driver nfb_eth_driver = {
+ .id_table = nfb_pci_id_table,
+ .probe = nfb_eth_pci_probe,
+ .remove = nfb_eth_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(RTE_NFB_DRIVER_NAME, nfb_eth_driver);
+RTE_PMD_REGISTER_PCI_TABLE(RTE_NFB_DRIVER_NAME, nfb_pci_id_table);
+RTE_PMD_REGISTER_KMOD_DEP(RTE_NFB_DRIVER_NAME, "* nfb");
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Cesnet
+ * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+ * All rights reserved.
+ */
+
+#include "nfb_rx.h"
+#include "nfb.h"
+
+int
+nfb_eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id)
+{
+ struct ndp_rx_queue *rxq = dev->data->rx_queues[rxq_id];
+ int ret;
+
+ if (rxq->queue == NULL) {
+ RTE_LOG(ERR, PMD, "RX NDP queue is NULL!\n");
+ return -EINVAL;
+ }
+
+ ret = ndp_queue_start(rxq->queue);
+ if (ret != 0)
+ goto err;
+ dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
+int
+nfb_eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id)
+{
+ struct ndp_rx_queue *rxq = dev->data->rx_queues[rxq_id];
+ int ret;
+
+ if (rxq->queue == NULL) {
+ RTE_LOG(ERR, PMD, "RX NDP queue is NULL!\n");
+ return -EINVAL;
+ }
+
+ ret = ndp_queue_stop(rxq->queue);
+ if (ret != 0)
+ return -EINVAL;
+
+ dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+int
+nfb_eth_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ struct ndp_rx_queue *rxq;
+ int ret;
+
+ rxq = rte_zmalloc_socket("ndp rx queue",
+ sizeof(struct ndp_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (rxq == NULL) {
+ RTE_LOG(ERR, PMD, "rte_zmalloc_socket() failed for rx queue id "
+ "%" PRIu16 "!\n", rx_queue_id);
+ return -ENOMEM;
+ }
+
+ ret = nfb_eth_rx_queue_init(internals->nfb,
+ rx_queue_id,
+ dev->data->port_id,
+ mb_pool,
+ rxq);
+
+ if (ret == 0)
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ else
+ rte_free(rxq);
+
+ return ret;
+}
+
+int
+nfb_eth_rx_queue_init(struct nfb_device *nfb,
+ uint16_t rx_queue_id,
+ uint16_t port_id,
+ struct rte_mempool *mb_pool,
+ struct ndp_rx_queue *rxq)
+{
+ const struct rte_pktmbuf_pool_private *mbp_priv =
+ rte_mempool_get_priv(mb_pool);
+
+ if (nfb == NULL)
+ return -EINVAL;
+
+ rxq->queue = ndp_open_rx_queue(nfb, rx_queue_id);
+ if (rxq->queue == NULL)
+ return -EINVAL;
+
+ rxq->nfb = nfb;
+ rxq->rx_queue_id = rx_queue_id;
+ rxq->in_port = port_id;
+ rxq->mb_pool = mb_pool;
+ rxq->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+
+ rxq->rx_pkts = 0;
+ rxq->rx_bytes = 0;
+ rxq->err_pkts = 0;
+
+ return 0;
+}
+
+void
+nfb_eth_rx_queue_release(void *q)
+{
+ struct ndp_rx_queue *rxq = (struct ndp_rx_queue *)q;
+ if (rxq->queue != NULL) {
+ ndp_close_rx_queue(rxq->queue);
+ rte_free(rxq);
+ rxq->queue = NULL;
+ }
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Cesnet
+ * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+ * All rights reserved.
+ */
+
+#ifndef _NFB_RX_H_
+#define _NFB_RX_H_
+
+#include <nfb/nfb.h>
+#include <nfb/ndp.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+
+struct ndp_rx_queue {
+ struct nfb_device *nfb; /* nfb dev structure */
+ struct ndp_queue *queue; /* rx queue */
+ uint16_t rx_queue_id; /* index */
+ uint8_t in_port; /* port */
+
+ struct rte_mempool *mb_pool; /* memory pool to allocate packets */
+ uint16_t buf_size; /* mbuf size */
+
+ volatile uint64_t rx_pkts; /* packets read */
+ volatile uint64_t rx_bytes; /* bytes read */
+ volatile uint64_t err_pkts; /* erroneous packets */
+};
+
+/**
+ * Initialize ndp_rx_queue structure
+ *
+ * @param nfb
+ * Pointer to nfb device structure.
+ * @param rx_queue_id
+ * RX queue index.
+ * @param port_id
+ * Device [external] port identifier.
+ * @param mb_pool
+ * Memory pool for buffer allocations.
+ * @param[out] rxq
+ * Pointer to ndp_rx_queue output structure
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+int
+nfb_eth_rx_queue_init(struct nfb_device *nfb,
+ uint16_t rx_queue_id,
+ uint16_t port_id,
+ struct rte_mempool *mb_pool,
+ struct ndp_rx_queue *rxq);
+
+/**
+ * DPDK callback to setup a RX queue for use.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mb_pool
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+int
+nfb_eth_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool);
+
+/**
+ * DPDK callback to release a RX queue.
+ *
+ * @param dpdk_rxq
+ * Generic RX queue pointer.
+ */
+void
+nfb_eth_rx_queue_release(void *q);
+
+/**
+ * Start traffic on Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param txq_id
+ * RX queue index.
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+int
+nfb_eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id);
+
+/**
+ * Stop traffic on Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param txq_id
+ * RX queue index.
+ */
+int
+nfb_eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id);
+
+/**
+ * DPDK callback for RX.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] bufs
+ * Array to store received packets.
+ * @param nb_pkts
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= nb_pkts).
+ */
+static __rte_always_inline uint16_t
+nfb_eth_ndp_rx(void *queue,
+ struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct ndp_rx_queue *ndp = queue;
+ uint16_t packet_size;
+ uint64_t num_bytes = 0;
+ uint16_t num_rx;
+ unsigned int i;
+
+ const uint16_t buf_size = ndp->buf_size;
+
+ struct rte_mbuf *mbuf;
+ struct ndp_packet packets[nb_pkts];
+
+ struct rte_mbuf *mbufs[nb_pkts];
+
+ if (unlikely(ndp->queue == NULL || nb_pkts == 0)) {
+ RTE_LOG(ERR, PMD, "RX invalid arguments!\n");
+ return 0;
+ }
+
+ /* returns either all or nothing */
+ i = rte_pktmbuf_alloc_bulk(ndp->mb_pool, mbufs, nb_pkts);
+ if (unlikely(i != 0))
+ return 0;
+
+ num_rx = ndp_rx_burst_get(ndp->queue, packets, nb_pkts);
+
+ if (unlikely(num_rx != nb_pkts)) {
+ for (i = num_rx; i < nb_pkts; i++)
+ rte_pktmbuf_free(mbufs[i]);
+ }
+
+ nb_pkts = num_rx;
+
+ num_rx = 0;
+ /*
+ * Reads the given number of packets from NDP queue given
+ * by queue and copies the packet data into a newly allocated mbuf
+ * to return.
+ */
+ for (i = 0; i < nb_pkts; ++i) {
+ mbuf = mbufs[i];
+
+ /* get the space available for data in the mbuf */
+ packet_size = packets[i].data_length;
+
+ if (likely(packet_size <= buf_size)) {
+ /* NDP packet will fit in one mbuf, go ahead and copy */
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
+ packets[i].data, packet_size);
+
+ mbuf->data_len = (uint16_t)packet_size;
+
+ mbuf->pkt_len = packet_size;
+ mbuf->port = ndp->in_port;
+ bufs[num_rx++] = mbuf;
+ num_bytes += packet_size;
+ } else {
+ /*
+ * NDP packet will not fit in one mbuf,
+ * scattered mode is not enabled, drop packet
+ */
+ rte_pktmbuf_free(mbuf);
+ }
+ }
+
+ ndp_rx_burst_put(ndp->queue);
+
+ ndp->rx_pkts += num_rx;
+ ndp->rx_bytes += num_bytes;
+ return num_rx;
+}
+
+#endif /* _NFB_RX_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Cesnet
+ * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+ * All rights reserved.
+ */
+
+#include "nfb_rxmode.h"
+#include "nfb.h"
+
+void
+nfb_eth_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+ uint16_t i;
+
+ internals->rx_filter_original = RXMAC_MAC_FILTER_PROMISCUOUS;
+
+ for (i = 0; i < internals->max_rxmac; ++i) {
+ nc_rxmac_mac_filter_enable(internals->rxmac[i],
+ RXMAC_MAC_FILTER_PROMISCUOUS);
+ }
+}
+
+void
+nfb_eth_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+ uint16_t i;
+
+ internals->rx_filter_original = RXMAC_MAC_FILTER_TABLE;
+
+ /* if promisc is not enabled, do nothing */
+ if (!nfb_eth_promiscuous_get(dev))
+ return;
+
+ for (i = 0; i < internals->max_rxmac; ++i) {
+ nc_rxmac_mac_filter_enable(internals->rxmac[i],
+ RXMAC_MAC_FILTER_TABLE);
+ }
+}
+
+int
+nfb_eth_promiscuous_get(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ struct nc_rxmac_status status;
+ status.mac_filter = RXMAC_MAC_FILTER_PROMISCUOUS;
+
+ nc_rxmac_read_status(internals->rxmac[0], &status);
+
+ return (status.mac_filter == RXMAC_MAC_FILTER_PROMISCUOUS);
+}
+
+void
+nfb_eth_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ uint16_t i;
+ for (i = 0; i < internals->max_rxmac; ++i) {
+ nc_rxmac_mac_filter_enable(internals->rxmac[i],
+ RXMAC_MAC_FILTER_TABLE_BCAST_MCAST);
+ }
+}
+
+void
+nfb_eth_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ uint16_t i;
+
+ /* if multicast is not enabled do nothing */
+ if (!nfb_eth_allmulticast_get(dev))
+ return;
+
+ for (i = 0; i < internals->max_rxmac; ++i) {
+ nc_rxmac_mac_filter_enable(internals->rxmac[i],
+ internals->rx_filter_original);
+ }
+}
+
+int
+nfb_eth_allmulticast_get(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ struct nc_rxmac_status status;
+ status.mac_filter = RXMAC_MAC_FILTER_PROMISCUOUS;
+ nc_rxmac_read_status(internals->rxmac[0], &status);
+
+ return (status.mac_filter == RXMAC_MAC_FILTER_TABLE_BCAST_MCAST);
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Cesnet
+ * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+ * All rights reserved.
+ */
+
+#ifndef _NFB_RXMODE_H_
+#define _NFB_RXMODE_H_
+
+#include <nfb/nfb.h>
+#include <nfb/ndp.h>
+
+#include <rte_ethdev.h>
+
+/**
+ * Getter for promiscuous mode
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @return 1 if enabled 0 otherwise
+ */
+int
+nfb_eth_promiscuous_get(struct rte_eth_dev *dev);
+
+/**
+ * DPDK callback to enable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+nfb_eth_promiscuous_enable(struct rte_eth_dev *dev);
+
+/**
+ * DPDK callback to disable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+nfb_eth_promiscuous_disable(struct rte_eth_dev *dev);
+
+/**
+ * Getter for allmulticast mode
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @return 1 if enabled 0 otherwise
+ */
+int
+nfb_eth_allmulticast_get(struct rte_eth_dev *dev);
+
+/**
+ * DPDK callback to enable allmulticast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+nfb_eth_allmulticast_enable(struct rte_eth_dev *dev);
+
+/**
+ * DPDK callback to disable allmulticast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+nfb_eth_allmulticast_disable(struct rte_eth_dev *dev);
+
+#endif /* _NFB_RXMODE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Cesnet
+ * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+ * All rights reserved.
+ */
+
+#include "nfb_stats.h"
+#include "nfb.h"
+
+int
+nfb_eth_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ uint16_t i;
+ uint16_t nb_rx = dev->data->nb_rx_queues;
+ uint16_t nb_tx = dev->data->nb_tx_queues;
+ uint64_t rx_total = 0;
+ uint64_t tx_total = 0;
+ uint64_t tx_err_total = 0;
+ uint64_t rx_total_bytes = 0;
+ uint64_t tx_total_bytes = 0;
+
+ struct ndp_rx_queue *rx_queue = *((struct ndp_rx_queue **)
+ dev->data->rx_queues);
+ struct ndp_tx_queue *tx_queue = *((struct ndp_tx_queue **)
+ dev->data->tx_queues);
+
+ for (i = 0; i < nb_rx; i++) {
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rx_queue[i].rx_pkts;
+ stats->q_ibytes[i] = rx_queue[i].rx_bytes;
+ }
+ rx_total += rx_queue[i].rx_pkts;
+ rx_total_bytes += rx_queue[i].rx_bytes;
+ }
+
+ for (i = 0; i < nb_tx; i++) {
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = tx_queue[i].tx_pkts;
+ stats->q_obytes[i] = tx_queue[i].tx_bytes;
+ }
+ tx_total += tx_queue[i].tx_pkts;
+ tx_total_bytes += tx_queue[i].tx_bytes;
+ tx_err_total += tx_queue[i].err_pkts;
+ }
+
+ stats->ipackets = rx_total;
+ stats->opackets = tx_total;
+ stats->ibytes = rx_total_bytes;
+ stats->obytes = tx_total_bytes;
+ stats->oerrors = tx_err_total;
+ return 0;
+}
+
+void
+nfb_eth_stats_reset(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ uint16_t nb_rx = dev->data->nb_rx_queues;
+ uint16_t nb_tx = dev->data->nb_tx_queues;
+
+ struct ndp_rx_queue *rx_queue = *((struct ndp_rx_queue **)
+ dev->data->rx_queues);
+ struct ndp_tx_queue *tx_queue = *((struct ndp_tx_queue **)
+ dev->data->tx_queues);
+
+ for (i = 0; i < nb_rx; i++) {
+ rx_queue[i].rx_pkts = 0;
+ rx_queue[i].rx_bytes = 0;
+ rx_queue[i].err_pkts = 0;
+ }
+ for (i = 0; i < nb_tx; i++) {
+ tx_queue[i].tx_pkts = 0;
+ tx_queue[i].tx_bytes = 0;
+ tx_queue[i].err_pkts = 0;
+ }
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Cesnet
+ * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+ * All rights reserved.
+ */
+
+#ifndef _NFB_STATS_H_
+#define _NFB_STATS_H_
+
+#include <nfb/nfb.h>
+#include <nfb/ndp.h>
+
+#include <rte_ethdev.h>
+
+/**
+ * DPDK callback to get device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] stats
+ * Stats structure output buffer.
+ *
+ * @return
+ * 0 on success and stats is filled, negative errno value otherwise.
+ */
+int
+nfb_eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+
+/**
+ * DPDK callback to clear device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+nfb_eth_stats_reset(struct rte_eth_dev *dev);
+
+#endif /* _NFB_STATS_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Cesnet
+ * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+ * All rights reserved.
+ */
+
+#include "nfb_tx.h"
+#include "nfb.h"
+
+int
+nfb_eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id)
+{
+ struct ndp_tx_queue *txq = dev->data->tx_queues[txq_id];
+ int ret;
+
+ if (txq->queue == NULL) {
+ RTE_LOG(ERR, PMD, "RX NDP queue is NULL!\n");
+ return -EINVAL;
+ }
+
+ ret = ndp_queue_start(txq->queue);
+ if (ret != 0)
+ goto err;
+ dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
+int
+nfb_eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id)
+{
+ struct ndp_tx_queue *txq = dev->data->tx_queues[txq_id];
+ int ret;
+
+ if (txq->queue == NULL) {
+ RTE_LOG(ERR, PMD, "TX NDP queue is NULL!\n");
+ return -EINVAL;
+ }
+
+ ret = ndp_queue_stop(txq->queue);
+ if (ret != 0)
+ return -EINVAL;
+ dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+int
+nfb_eth_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ int ret;
+ struct ndp_tx_queue *txq;
+
+ txq = rte_zmalloc_socket("ndp tx queue",
+ sizeof(struct ndp_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (txq == NULL) {
+ RTE_LOG(ERR, PMD, "rte_zmalloc_socket() failed for tx queue id "
+ "%" PRIu16 "!\n", tx_queue_id);
+ return -ENOMEM;
+ }
+
+ ret = nfb_eth_tx_queue_init(internals->nfb,
+ tx_queue_id,
+ txq);
+
+ if (ret == 0)
+ dev->data->tx_queues[tx_queue_id] = txq;
+ else
+ rte_free(txq);
+
+ return ret;
+}
+
+int
+nfb_eth_tx_queue_init(struct nfb_device *nfb,
+ uint16_t tx_queue_id,
+ struct ndp_tx_queue *txq)
+{
+ if (nfb == NULL)
+ return -EINVAL;
+
+ txq->queue = ndp_open_tx_queue(nfb, tx_queue_id);
+ if (txq->queue == NULL)
+ return -EINVAL;
+
+ txq->nfb = nfb;
+ txq->tx_queue_id = tx_queue_id;
+
+ txq->tx_pkts = 0;
+ txq->tx_bytes = 0;
+ txq->err_pkts = 0;
+
+ return 0;
+}
+
+void
+nfb_eth_tx_queue_release(void *q)
+{
+ struct ndp_tx_queue *txq = (struct ndp_tx_queue *)q;
+ if (txq->queue != NULL) {
+ ndp_close_tx_queue(txq->queue);
+ rte_free(txq);
+ txq->queue = NULL;
+ }
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Cesnet
+ * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
+ * All rights reserved.
+ */
+
+#ifndef _NFB_TX_H_
+#define _NFB_TX_H_
+
+#include <nfb/nfb.h>
+#include <nfb/ndp.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+struct ndp_tx_queue {
+ struct nfb_device *nfb; /* nfb dev structure */
+ struct ndp_queue *queue; /* tx queue */
+ uint16_t tx_queue_id; /* index */
+ volatile uint64_t tx_pkts; /* packets transmitted */
+ volatile uint64_t tx_bytes; /* bytes transmitted */
+ volatile uint64_t err_pkts; /* erroneous packets */
+};
+
+/**
+ * DPDK callback to setup a TX queue for use.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+int
+nfb_eth_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused);
+
+/**
+ * Initialize ndp_tx_queue structure
+ *
+ * @param nfb
+ * Pointer to nfb device structure.
+ * @param tx_queue_id
+ * TX queue index.
+ * @param[out] txq
+ * Pointer to ndp_tx_queue output structure
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+int
+nfb_eth_tx_queue_init(struct nfb_device *nfb,
+ uint16_t tx_queue_id,
+ struct ndp_tx_queue *txq);
+
+/**
+ * DPDK callback to release a RX queue.
+ *
+ * @param dpdk_rxq
+ * Generic RX queue pointer.
+ */
+void
+nfb_eth_tx_queue_release(void *q);
+
+/**
+ * Start traffic on Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param txq_id
+ * TX queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+int
+nfb_eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id);
+
+/**
+ * Stop traffic on Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param txq_id
+ * TX queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+int
+nfb_eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id);
+
+/**
+ * DPDK callback for TX.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param bufs
+ * Packets to transmit.
+ * @param nb_pkts
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= nb_pkts).
+ */
+static __rte_always_inline uint16_t
+nfb_eth_ndp_tx(void *queue,
+ struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ int i;
+ struct rte_mbuf *mbuf;
+ struct ndp_tx_queue *ndp = queue;
+ uint16_t num_tx = 0;
+ uint64_t num_bytes = 0;
+
+ void *dst;
+ uint32_t pkt_len;
+ uint8_t mbuf_segs;
+
+ struct ndp_packet packets[nb_pkts];
+
+ if (unlikely(ndp->queue == NULL || nb_pkts == 0)) {
+ RTE_LOG(ERR, PMD, "TX invalid arguments!\n");
+ return 0;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
+ packets[i].data_length = bufs[i]->pkt_len;
+ packets[i].header_length = 0;
+ }
+
+ num_tx = ndp_tx_burst_get(ndp->queue, packets, nb_pkts);
+
+ if (unlikely(num_tx != nb_pkts))
+ return 0;
+
+ for (i = 0; i < nb_pkts; ++i) {
+ mbuf = bufs[i];
+
+ pkt_len = mbuf->pkt_len;
+ mbuf_segs = mbuf->nb_segs;
+
+ num_bytes += pkt_len;
+ if (mbuf_segs == 1) {
+ /*
+ * non-scattered packet,
+ * transmit from one mbuf
+ */
+ rte_memcpy(packets[i].data,
+ rte_pktmbuf_mtod(mbuf, const void *),
+ pkt_len);
+ } else {
+ /* scattered packet, transmit from more mbufs */
+ struct rte_mbuf *m = mbuf;
+ while (m) {
+ dst = packets[i].data;
+
+ rte_memcpy(dst,
+ rte_pktmbuf_mtod(m,
+ const void *),
+ m->data_len);
+ dst = ((uint8_t *)(dst)) +
+ m->data_len;
+ m = m->next;
+ }
+ }
+
+ rte_pktmbuf_free(mbuf);
+ }
+
+ ndp_tx_burst_flush(ndp->queue);
+
+ ndp->tx_pkts += num_tx;
+ ndp->err_pkts += nb_pkts - num_tx;
+ ndp->tx_bytes += num_bytes;
+ return num_tx;
+}
+
+#endif /* _NFB_TX_H_ */
--- /dev/null
+DPDK_19.05 {
+
+ local: *;
+};
endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += -lrte_pmd_sfc_efx
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += -lrte_pmd_szedata2 -lsze2
+_LDLIBS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += -lrte_pmd_nfb
+_LDLIBS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --libs netcope-common)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += -lrte_pmd_tap
_LDLIBS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += -lrte_pmd_thunderx_nicvf
_LDLIBS-$(CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD) += -lrte_pmd_vdev_netvsc