-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2014-2018 Netronome Systems, Inc.
* All rights reserved.
*
* Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
/*
#include <rte_common.h>
#include <rte_log.h>
#include <rte_debug.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_dev.h>
#include <rte_ether.h>
#include <rte_malloc.h>
#include <errno.h>
/* Prototypes */
-static void nfp_net_close(struct rte_eth_dev *dev);
+static int nfp_net_close(struct rte_eth_dev *dev);
static int nfp_net_configure(struct rte_eth_dev *dev);
static void nfp_net_dev_interrupt_handler(void *param);
static void nfp_net_dev_interrupt_delayed_handler(void *param);
static int nfp_net_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int nfp_net_init(struct rte_eth_dev *eth_dev);
+static int nfp_pf_init(struct rte_eth_dev *eth_dev);
+static int nfp_pci_uninit(struct rte_eth_dev *eth_dev);
+static int nfp_init_phyports(struct nfp_pf_dev *pf_dev);
static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
-static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
-static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
+static int nfp_net_promisc_enable(struct rte_eth_dev *dev);
+static int nfp_net_promisc_disable(struct rte_eth_dev *dev);
static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
uint16_t queue_idx);
static int nfp_net_start(struct rte_eth_dev *dev);
static int nfp_net_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
-static void nfp_net_stats_reset(struct rte_eth_dev *dev);
-static void nfp_net_stop(struct rte_eth_dev *dev);
+static int nfp_net_stats_reset(struct rte_eth_dev *dev);
+static int nfp_net_stop(struct rte_eth_dev *dev);
static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
struct rte_eth_rss_conf *rss_conf);
static int nfp_set_mac_addr(struct rte_eth_dev *dev,
struct rte_ether_addr *mac_addr);
+static int32_t nfp_cpp_bridge_service_func(void *args);
+static int nfp_fw_setup(struct rte_pci_device *dev,
+ struct nfp_cpp *cpp,
+ struct nfp_eth_table *nfp_eth_table,
+ struct nfp_hwinfo *hwinfo);
+
/* The offset of the queue controller queues in the PCIe Target */
#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
rxmode = &dev_conf->rxmode;
txmode = &dev_conf->txmode;
+ if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
+ rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* Checking TX mode */
if (txmode->mq_mode) {
PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
}
static int
-nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
+nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port)
{
struct nfp_eth_table *nfp_eth_table;
+ struct nfp_net_hw *hw = NULL;
+
+ /* Grab a pointer to the correct physical port */
+ hw = pf_dev->ports[port];
+
+ nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
- nfp_eth_table = nfp_eth_read_ports(hw->cpp);
- /*
- * hw points to port0 private data. We need hw now pointing to
- * right port.
- */
- hw += port;
nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
(uint8_t *)&nfp_eth_table->ports[port].mac_addr);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
+ struct nfp_pf_dev *pf_dev;
struct rte_eth_conf *dev_conf;
struct rte_eth_rxmode *rxmode;
uint32_t intr_vector;
int ret;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
PMD_INIT_LOG(DEBUG, "Start");
/* check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0) {
- if (hw->pf_multiport_enabled) {
+ if (pf_dev->multiport) {
PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
"with NFP multiport PF");
return -EINVAL;
goto error;
}
- if (hw->is_pf) {
+ if (hw->is_phyport) {
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
/* Configure the physical port up */
- nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1);
+ nfp_eth_set_configured(hw->cpp, hw->idx, 1);
else
nfp_eth_set_configured(dev->process_private,
- hw->pf_port_idx, 1);
+ hw->idx, 1);
}
hw->ctrl = new_ctrl;
}
/* Stop device: disable rx and tx functions to allow for reconfiguring. */
-static void
+static int
nfp_net_stop(struct rte_eth_dev *dev)
{
int i;
(struct nfp_net_rxq *)dev->data->rx_queues[i]);
}
- if (hw->is_pf) {
+ if (hw->is_phyport) {
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
/* Configure the physical port down */
- nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0);
+ nfp_eth_set_configured(hw->cpp, hw->idx, 0);
else
nfp_eth_set_configured(dev->process_private,
- hw->pf_port_idx, 0);
+ hw->idx, 0);
}
+
+ return 0;
}
/* Set the link up. */
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!hw->is_pf)
+ if (!hw->is_phyport)
return -ENOTSUP;
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
/* Configure the physical port down */
- return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1);
+ return nfp_eth_set_configured(hw->cpp, hw->idx, 1);
else
return nfp_eth_set_configured(dev->process_private,
- hw->pf_port_idx, 1);
+ hw->idx, 1);
}
/* Set the link down. */
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!hw->is_pf)
+ if (!hw->is_phyport)
return -ENOTSUP;
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
/* Configure the physical port down */
- return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0);
+ return nfp_eth_set_configured(hw->cpp, hw->idx, 0);
else
return nfp_eth_set_configured(dev->process_private,
- hw->pf_port_idx, 0);
+ hw->idx, 0);
}
/* Reset and stop device. The device can not be restarted. */
-static void
+static int
nfp_net_close(struct rte_eth_dev *dev)
{
struct nfp_net_hw *hw;
struct rte_pci_device *pci_dev;
int i;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
PMD_INIT_LOG(DEBUG, "Close");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
(struct nfp_net_rxq *)dev->data->rx_queues[i]);
}
+ /* Only free PF resources after all physical ports have been closed */
+ if (pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC ||
+ pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC) {
+ struct nfp_pf_dev *pf_dev;
+ pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* Mark this port as unused and free device priv resources*/
+ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
+ pf_dev->ports[hw->idx] = NULL;
+ rte_eth_dev_release_port(dev);
+
+ for (i = 0; i < pf_dev->total_phyports; i++) {
+ /* Check to see if ports are still in use */
+ if (pf_dev->ports[i])
+ return 0;
+ }
+
+ /* Now it is safe to free all PF resources */
+ PMD_INIT_LOG(INFO, "Freeing PF resources");
+ nfp_cpp_area_free(pf_dev->ctrl_area);
+ nfp_cpp_area_free(pf_dev->hwqueues_area);
+ free(pf_dev->hwinfo);
+ free(pf_dev->sym_tbl);
+ nfp_cpp_free(pf_dev->cpp);
+ rte_free(pf_dev);
+ }
+
rte_intr_disable(&pci_dev->intr_handle);
- nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
/* unregister callback func from eal lib */
rte_intr_callback_unregister(&pci_dev->intr_handle,
* The ixgbe PMD driver disables the pcie master on the
* device. The i40e does not...
*/
+
+ return 0;
}
-static void
+static int
nfp_net_promisc_enable(struct rte_eth_dev *dev)
{
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
+ int ret;
PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
- return;
+ return -ENOTSUP;
}
if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
- return;
+ return 0;
}
new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
* DPDK sets promiscuous mode on just after this call assuming
* it can not fail ...
*/
- if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
- return;
+ ret = nfp_net_reconfig(hw, new_ctrl, update);
+ if (ret < 0)
+ return ret;
hw->ctrl = new_ctrl;
+
+ return 0;
}
-static void
+static int
nfp_net_promisc_disable(struct rte_eth_dev *dev)
{
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
+ int ret;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
- return;
+ return 0;
}
new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
* DPDK sets promiscuous mode off just before this call
* assuming it can not fail ...
*/
- if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
- return;
+ ret = nfp_net_reconfig(hw, new_ctrl, update);
+ if (ret < 0)
+ return ret;
hw->ctrl = new_ctrl;
+
+ return 0;
}
/*
return -EINVAL;
}
-static void
+static int
nfp_net_stats_reset(struct rte_eth_dev *dev)
{
int i;
hw->eth_stats_base.imissed =
nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+
+ return 0;
}
static int
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_RSS_HASH;
if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
};
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = NFP_NET_MAX_RX_DESC,
+ .nb_min = NFP_NET_MIN_RX_DESC,
+ .nb_align = NFP_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = NFP_NET_MAX_TX_DESC,
+ .nb_min = NFP_NET_MIN_TX_DESC,
+ .nb_align = NFP_ALIGN_RING_DESC,
+ .nb_seg_max = NFP_TX_MAX_SEG,
+ .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
+ };
+
dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
ETH_RSS_NONFRAG_IPV4_TCP |
ETH_RSS_NONFRAG_IPV4_UDP |
PMD_DRV_LOG(INFO, " Port %d: Link Down",
dev->data->port_id);
- PMD_DRV_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
- pci_dev->addr.domain, pci_dev->addr.bus,
- pci_dev->addr.devid, pci_dev->addr.function);
+ PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
}
/* Interrupt configuration and handling */
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
nfp_net_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
nfp_net_dev_link_status_print(dev);
}
/* switch to jumbo mode if needed */
- if ((uint32_t)mtu > RTE_ETHER_MAX_LEN)
+ if ((uint32_t)mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
const struct rte_memzone *tz;
struct nfp_net_rxq *rxq;
struct nfp_net_hw *hw;
+ uint32_t rx_desc_sz;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
/* Validating number of descriptors */
- if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
- (nb_desc > NFP_NET_MAX_RX_DESC) ||
- (nb_desc < NFP_NET_MIN_RX_DESC)) {
+ rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc);
+ if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
+ nb_desc > NFP_NET_MAX_RX_DESC ||
+ nb_desc < NFP_NET_MIN_RX_DESC) {
PMD_DRV_LOG(ERR, "Wrong nb_desc value");
return -EINVAL;
}
struct nfp_net_txq *txq;
uint16_t tx_free_thresh;
struct nfp_net_hw *hw;
+ uint32_t tx_desc_sz;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
/* Validating number of descriptors */
- if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
- (nb_desc > NFP_NET_MAX_TX_DESC) ||
- (nb_desc < NFP_NET_MIN_TX_DESC)) {
+ tx_desc_sz = nb_desc * sizeof(struct nfp_net_tx_desc);
+ if (tx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
+ nb_desc > NFP_NET_MAX_TX_DESC ||
+ nb_desc < NFP_NET_MIN_TX_DESC) {
PMD_DRV_LOG(ERR, "Wrong nb_desc value");
return -EINVAL;
}
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
new_ctrl = 0;
- if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
- (mask & ETH_VLAN_EXTEND_OFFLOAD))
- PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or"
- " ETH_VLAN_EXTEND_OFFLOAD");
-
/* Enable vlan strip if it is not configured yet */
if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
!(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+ /* Propagate current RSS hash functions to caller */
+ rss_conf->rss_hf = rss_hf;
+
/* Reading the key size */
rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
.rss_hash_conf_get = nfp_net_rss_hash_conf_get,
.rx_queue_setup = nfp_net_rx_queue_setup,
.rx_queue_release = nfp_net_rx_queue_release,
- .rx_queue_count = nfp_net_rx_queue_count,
.tx_queue_setup = nfp_net_tx_queue_setup,
.tx_queue_release = nfp_net_tx_queue_release,
.rx_queue_intr_enable = nfp_rx_queue_intr_enable,
.rx_queue_intr_disable = nfp_rx_queue_intr_disable,
};
-/*
- * All eth_dev created got its private data, but before nfp_net_init, that
- * private data is referencing private data for all the PF ports. This is due
- * to how the vNIC bars are mapped based on first port, so all ports need info
- * about port 0 private data. Inside nfp_net_init the private data pointer is
- * changed to the right address for each port once the bars have been mapped.
- *
- * This functions helps to find out which port and therefore which offset
- * inside the private data array to use.
- */
-static int
-get_pf_port_number(char *name)
-{
- char *pf_str = name;
- int size = 0;
-
- while ((*pf_str != '_') && (*pf_str != '\0') && (size++ < 30))
- pf_str++;
-
- if (size == 30)
- /*
- * This should not happen at all and it would mean major
- * implementation fault.
- */
- rte_panic("nfp_net: problem with pf device name\n");
-
- /* Expecting _portX with X within [0,7] */
- pf_str += 5;
-
- return (int)strtol(pf_str, NULL, 10);
-}
static int
nfp_net_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
- struct nfp_net_hw *hw, *hwport0;
+ struct nfp_pf_dev *pf_dev;
+ struct nfp_net_hw *hw;
uint64_t tx_bar_off = 0, rx_bar_off = 0;
uint32_t start_q;
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ /* Use backpointer here to the PF of this eth_dev */
+ pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
+
/* NFP can not handle DMA addresses requiring more than 40 bits */
if (rte_mem_check_dma_mask(40)) {
RTE_LOG(ERR, PMD, "device %s can not be used:",
if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
(pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
- port = get_pf_port_number(eth_dev->data->name);
+ port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
if (port < 0 || port > 7) {
PMD_DRV_LOG(ERR, "Port value is wrong");
return -ENODEV;
}
- PMD_INIT_LOG(DEBUG, "Working with PF port value %d", port);
+ /* This points to the specific port private data */
+ PMD_INIT_LOG(DEBUG, "Working with physical port number %d",
+ port);
- /* This points to port 0 private data */
- hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ /* Use PF array of physical ports to get pointer to
+ * this specific port
+ */
+ hw = pf_dev->ports[port];
- /* This points to the specific port private data */
- hw = &hwport0[port];
} else {
hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- hwport0 = 0;
}
eth_dev->dev_ops = &nfp_net_eth_dev_ops;
+ eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
return -ENODEV;
}
- if (hw->is_pf && port == 0) {
- hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0",
- hw->total_ports * 32768,
- &hw->ctrl_area);
- if (!hw->ctrl_bar) {
- printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar");
- return -EIO;
+ if (hw->is_phyport) {
+ if (port == 0) {
+ hw->ctrl_bar = pf_dev->ctrl_bar;
+ } else {
+ if (!pf_dev->ctrl_bar)
+ return -ENODEV;
+ /* Use port offset in pf ctrl_bar for this
+ * ports control bar
+ */
+ hw->ctrl_bar = pf_dev->ctrl_bar +
+ (port * NFP_PF_CSR_SLICE_SIZE);
}
-
- PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
- }
-
- if (port > 0) {
- if (!hwport0->ctrl_bar)
- return -ENODEV;
-
- /* address based on port0 offset */
- hw->ctrl_bar = hwport0->ctrl_bar +
- (port * NFP_PF_CSR_SLICE_SIZE);
}
PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
- if (hw->is_pf && port == 0) {
- /* configure access to tx/rx vNIC BARs */
- hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0,
- NFP_PCIE_QUEUE(0),
- NFP_QCP_QUEUE_AREA_SZ,
- &hw->hwqueues_area);
-
- if (!hwport0->hw_queues) {
- printf("nfp_rtsym_map fails for net.qc");
- err = -EIO;
- goto dev_err_ctrl_map;
- }
-
- PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p",
- hwport0->hw_queues);
- }
-
- if (hw->is_pf) {
- hw->tx_bar = hwport0->hw_queues + tx_bar_off;
- hw->rx_bar = hwport0->hw_queues + rx_bar_off;
+ if (hw->is_phyport) {
+ hw->tx_bar = pf_dev->hw_queues + tx_bar_off;
+ hw->rx_bar = pf_dev->hw_queues + rx_bar_off;
eth_dev->data->dev_private = hw;
} else {
hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
goto dev_err_queues_map;
}
- if (hw->is_pf) {
- nfp_net_pf_read_mac(hwport0, port);
+ if (hw->is_phyport) {
+ nfp_net_pf_read_mac(pf_dev, port);
nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
} else {
nfp_net_vf_read_mac(hw);
if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
"mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
size_t count, curlen, totlen = 0;
int err = 0;
- PMD_CPP_LOG(DEBUG, "%s: offset size %lu, count_size: %lu\n", __func__,
+ PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
sizeof(off_t), sizeof(size_t));
/* Reading the count param */
cpp_id = (offset >> 40) << 8;
nfp_offset = offset & ((1ull << 40) - 1);
- PMD_CPP_LOG(DEBUG, "%s: count %lu and offset %ld\n", __func__, count,
+ PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count,
offset);
- PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %ld\n", __func__,
+ PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__,
cpp_id, nfp_offset);
/* Adjust length if not aligned */
if (len > sizeof(tmpbuf))
len = sizeof(tmpbuf);
- PMD_CPP_LOG(DEBUG, "%s: Receive %u of %lu\n", __func__,
+ PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu\n", __func__,
len, count);
err = recv(sockfd, tmpbuf, len, MSG_WAITALL);
if (err != (int)len) {
RTE_LOG(ERR, PMD,
- "%s: error when receiving, %d of %lu\n",
+ "%s: error when receiving, %d of %zu\n",
__func__, err, count);
nfp_cpp_area_release(area);
nfp_cpp_area_free(area);
size_t count, curlen, totlen = 0;
int err = 0;
- PMD_CPP_LOG(DEBUG, "%s: offset size %lu, count_size: %lu\n", __func__,
+ PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
sizeof(off_t), sizeof(size_t));
/* Reading the count param */
cpp_id = (offset >> 40) << 8;
nfp_offset = offset & ((1ull << 40) - 1);
- PMD_CPP_LOG(DEBUG, "%s: count %lu and offset %ld\n", __func__, count,
+ PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count,
offset);
- PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %ld\n", __func__,
+ PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__,
cpp_id, nfp_offset);
/* Adjust length if not aligned */
nfp_cpp_area_free(area);
return -EIO;
}
- PMD_CPP_LOG(DEBUG, "%s: sending %u of %lu\n", __func__,
+ PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu\n", __func__,
len, count);
err = send(sockfd, tmpbuf, len, 0);
if (err != (int)len) {
RTE_LOG(ERR, PMD,
- "%s: error when sending: %d of %lu\n",
+ "%s: error when sending: %d of %zu\n",
__func__, err, count);
nfp_cpp_area_release(area);
nfp_cpp_area_free(area);
return 0;
}
-static int
-nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
- struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo,
- int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv)
-{
- struct rte_eth_dev *eth_dev;
- struct nfp_net_hw *hw = NULL;
- char *port_name;
- struct rte_service_spec service;
- int retval;
-
- port_name = rte_zmalloc("nfp_pf_port_name", 100, 0);
- if (!port_name)
- return -ENOMEM;
-
- if (ports > 1)
- snprintf(port_name, 100, "%s_port%d", dev->device.name, port);
- else
- strlcat(port_name, dev->device.name, 100);
-
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- eth_dev = rte_eth_dev_allocate(port_name);
- if (!eth_dev) {
- rte_free(port_name);
- return -ENODEV;
- }
- if (port == 0) {
- *priv = rte_zmalloc(port_name,
- sizeof(struct nfp_net_adapter) *
- ports, RTE_CACHE_LINE_SIZE);
- if (!*priv) {
- rte_free(port_name);
- rte_eth_dev_release_port(eth_dev);
- return -ENOMEM;
- }
- }
- eth_dev->data->dev_private = *priv;
-
- /*
- * dev_private pointing to port0 dev_private because we need
- * to configure vNIC bars based on port0 at nfp_net_init.
- * Then dev_private is adjusted per port.
- */
- hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
- hw->cpp = cpp;
- hw->hwinfo = hwinfo;
- hw->sym_tbl = sym_tbl;
- hw->pf_port_idx = phys_port;
- hw->is_pf = 1;
- if (ports > 1)
- hw->pf_multiport_enabled = 1;
-
- hw->total_ports = ports;
- } else {
- eth_dev = rte_eth_dev_attach_secondary(port_name);
- if (!eth_dev) {
- RTE_LOG(ERR, EAL, "secondary process attach failed, "
- "ethdev doesn't exist");
- rte_free(port_name);
- return -ENODEV;
- }
- eth_dev->process_private = cpp;
- }
-
- eth_dev->device = &dev->device;
- rte_eth_copy_pci_info(eth_dev, dev);
-
- retval = nfp_net_init(eth_dev);
-
- if (retval) {
- retval = -ENODEV;
- goto probe_failed;
- } else {
- rte_eth_dev_probing_finish(eth_dev);
- }
-
- rte_free(port_name);
-
- if (port == 0) {
- /*
- * The rte_service needs to be created just once per PMD.
- * And the cpp handler needs to be linked to the service.
- * Secondary processes will be used for debugging DPDK apps
- * when requiring to use the CPP interface for accessing NFP
- * components. And the cpp handler for secondary processes is
- * available at this point.
- */
- memset(&service, 0, sizeof(struct rte_service_spec));
- snprintf(service.name, sizeof(service.name), "nfp_cpp_service");
- service.callback = nfp_cpp_bridge_service_func;
- service.callback_userdata = (void *)cpp;
-
- hw = (struct nfp_net_hw *)(eth_dev->data->dev_private);
-
- if (rte_service_component_register(&service,
- &hw->nfp_cpp_service_id))
- RTE_LOG(ERR, PMD, "NFP CPP bridge service register() failed");
- else
- RTE_LOG(DEBUG, PMD, "NFP CPP bridge service registered");
- }
-
- return retval;
-
-probe_failed:
- rte_free(port_name);
- /* free ports private data if primary process */
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
-
- rte_eth_dev_release_port(eth_dev);
-
- return retval;
-}
-
#define DEFAULT_FW_PATH "/lib/firmware/netronome"
static int
return err;
}
-static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *dev)
+static int nfp_init_phyports(struct nfp_pf_dev *pf_dev)
+{
+ struct nfp_net_hw *hw;
+ struct rte_eth_dev *eth_dev;
+ int ret = 0;
+ int i;
+
+ /* Loop through all physical ports on PF */
+ for (i = 0; i < pf_dev->total_phyports; i++) {
+ const unsigned int numa_node = rte_socket_id();
+ char port_name[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(port_name, sizeof(port_name), "%s_port%d",
+ pf_dev->pci_dev->device.name, i);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_attach_secondary(port_name);
+ if (!eth_dev) {
+ RTE_LOG(ERR, EAL,
+ "secondary process attach failed, "
+ "ethdev doesn't exist");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ eth_dev->process_private = pf_dev->cpp;
+ goto nfp_net_init;
+ }
+
+ /* First port has already been initialized */
+ if (i == 0) {
+ eth_dev = pf_dev->eth_dev;
+ goto skip_dev_alloc;
+ }
+
+ /* Allocate a eth_dev for remaining ports */
+ eth_dev = rte_eth_dev_allocate(port_name);
+ if (!eth_dev) {
+ ret = -ENODEV;
+ goto port_cleanup;
+ }
+
+ /* Allocate memory for remaining ports */
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private) {
+ ret = -ENOMEM;
+ rte_eth_dev_release_port(eth_dev);
+ goto port_cleanup;
+ }
+
+skip_dev_alloc:
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ /* Add this device to the PF's array of physical ports */
+ pf_dev->ports[i] = hw;
+
+ hw->pf_dev = pf_dev;
+ hw->cpp = pf_dev->cpp;
+ hw->eth_dev = eth_dev;
+ hw->idx = i;
+ hw->is_phyport = true;
+
+nfp_net_init:
+ eth_dev->device = &pf_dev->pci_dev->device;
+
+ /* ctrl/tx/rx BAR mappings and remaining init happens in
+ * nfp_net_init
+ */
+ ret = nfp_net_init(eth_dev);
+
+ if (ret) {
+ ret = -ENODEV;
+ goto port_cleanup;
+ }
+
+ rte_eth_dev_probing_finish(eth_dev);
+
+ } /* End loop, all ports on this PF */
+ return 0;
+
+port_cleanup:
+ for (i = 0; i < pf_dev->total_phyports; i++) {
+ if (pf_dev->ports[i] && pf_dev->ports[i]->eth_dev) {
+ struct rte_eth_dev *tmp_dev;
+ tmp_dev = pf_dev->ports[i]->eth_dev;
+ rte_eth_dev_release_port(tmp_dev);
+ pf_dev->ports[i] = NULL;
+ }
+ }
+error:
+ return ret;
+}
+
+static int nfp_pf_init(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev;
+ struct nfp_net_hw *hw = NULL;
+ struct nfp_pf_dev *pf_dev = NULL;
struct nfp_cpp *cpp;
struct nfp_hwinfo *hwinfo;
struct nfp_rtsym_table *sym_tbl;
struct nfp_eth_table *nfp_eth_table = NULL;
+ struct rte_service_spec service;
+ char name[RTE_ETH_NAME_MAX_LEN];
int total_ports;
- void *priv = 0;
int ret = -ENODEV;
int err;
- int i;
- if (!dev)
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev);
+
+ if (!pci_dev)
return ret;
/*
* interface. Here we avoid this telling to the CPP init code to
* use a lock file if UIO is being used.
*/
- if (dev->kdrv == RTE_KDRV_VFIO)
- cpp = nfp_cpp_from_device_name(dev, 0);
+ if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
+ cpp = nfp_cpp_from_device_name(pci_dev, 0);
else
- cpp = nfp_cpp_from_device_name(dev, 1);
+ cpp = nfp_cpp_from_device_name(pci_dev, 1);
if (!cpp) {
- PMD_DRV_LOG(ERR, "A CPP handle can not be obtained");
+ PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
ret = -EIO;
goto error;
}
hwinfo = nfp_hwinfo_read(cpp);
if (!hwinfo) {
- PMD_DRV_LOG(ERR, "Error reading hwinfo table");
- return -EIO;
+ PMD_INIT_LOG(ERR, "Error reading hwinfo table");
+ ret = -EIO;
+ goto error;
}
nfp_eth_table = nfp_eth_read_ports(cpp);
if (!nfp_eth_table) {
- PMD_DRV_LOG(ERR, "Error reading NFP ethernet table");
- return -EIO;
+ PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
+ ret = -EIO;
+ goto hwinfo_cleanup;
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) {
- PMD_DRV_LOG(INFO, "Error when uploading firmware");
+ if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
+ PMD_INIT_LOG(ERR, "Error when uploading firmware");
ret = -EIO;
- goto error;
+ goto eth_table_cleanup;
}
}
/* Now the symbol table should be there */
sym_tbl = nfp_rtsym_table_read(cpp);
if (!sym_tbl) {
- PMD_DRV_LOG(ERR, "Something is wrong with the firmware"
+ PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
" symbol table");
ret = -EIO;
- goto error;
+ goto eth_table_cleanup;
}
total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
if (total_ports != (int)nfp_eth_table->count) {
PMD_DRV_LOG(ERR, "Inconsistent number of ports");
ret = -EIO;
- goto error;
+ goto sym_tbl_cleanup;
}
- PMD_INIT_LOG(INFO, "Total pf ports: %d", total_ports);
+
+ PMD_INIT_LOG(INFO, "Total physical ports: %d", total_ports);
if (total_ports <= 0 || total_ports > 8) {
- PMD_DRV_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
+ PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
ret = -ENODEV;
- goto error;
+ goto sym_tbl_cleanup;
+ }
+ /* Allocate memory for the PF "device" */
+ snprintf(name, sizeof(name), "nfp_pf%d", eth_dev->data->port_id);
+ pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
+ if (!pf_dev) {
+ ret = -ENOMEM;
+ goto sym_tbl_cleanup;
}
- for (i = 0; i < total_ports; i++) {
- ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo,
- nfp_eth_table->ports[i].index,
- sym_tbl, &priv);
- if (ret)
- break;
+ /* Populate the newly created PF device */
+ pf_dev->cpp = cpp;
+ pf_dev->hwinfo = hwinfo;
+ pf_dev->sym_tbl = sym_tbl;
+ pf_dev->total_phyports = total_ports;
+
+ if (total_ports > 1)
+ pf_dev->multiport = true;
+
+ pf_dev->pci_dev = pci_dev;
+
+ /* The first eth_dev is part of the PF struct */
+ pf_dev->eth_dev = eth_dev;
+
+ /* Map the symbol table */
+ pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
+ pf_dev->total_phyports * 32768,
+ &pf_dev->ctrl_area);
+ if (!pf_dev->ctrl_bar) {
+ PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
+ ret = -EIO;
+ goto pf_cleanup;
}
-error:
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
+
+ /* configure access to tx/rx vNIC BARs */
+ pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0,
+ NFP_PCIE_QUEUE(0),
+ NFP_QCP_QUEUE_AREA_SZ,
+ &pf_dev->hwqueues_area);
+ if (!pf_dev->hw_queues) {
+ PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
+ ret = -EIO;
+ goto ctrl_area_cleanup;
+ }
+
+ PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues);
+
+ /* Initialize and prep physical ports now
+ * This will loop through all physical ports
+ */
+ ret = nfp_init_phyports(pf_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Could not create physical ports");
+ goto hwqueues_cleanup;
+ }
+
+ /*
+ * The rte_service needs to be created just once per PMD.
+ * And the cpp handler needs to be linked to the service.
+ * Secondary processes will be used for debugging DPDK apps
+ * when requiring to use the CPP interface for accessing NFP
+ * components. And the cpp handler for secondary processes is
+ * available at this point.
+ */
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), "nfp_cpp_service");
+ service.callback = nfp_cpp_bridge_service_func;
+ service.callback_userdata = (void *)cpp;
+
+ if (rte_service_component_register(&service,
+ &hw->nfp_cpp_service_id))
+ RTE_LOG(ERR, PMD, "NFP CPP bridge service register() failed");
+ else
+ RTE_LOG(DEBUG, PMD, "NFP CPP bridge service registered");
+
+ return 0;
+
+hwqueues_cleanup:
+ nfp_cpp_area_free(pf_dev->hwqueues_area);
+ctrl_area_cleanup:
+ nfp_cpp_area_free(pf_dev->ctrl_area);
+pf_cleanup:
+ rte_free(pf_dev);
+sym_tbl_cleanup:
+ free(sym_tbl);
+eth_table_cleanup:
free(nfp_eth_table);
+hwinfo_cleanup:
+ free(hwinfo);
+error:
return ret;
}
-int nfp_logtype_init;
-int nfp_logtype_driver;
+static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *dev)
+{
+ return rte_eth_dev_pci_generic_probe(dev,
+ sizeof(struct nfp_net_hw), nfp_pf_init);
+}
static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
{
},
};
+static int nfp_pci_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ uint16_t port_id;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ if (pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC ||
+ pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC) {
+ /* Free up all physical ports under PF */
+ RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
+ rte_eth_dev_close(port_id);
+ /*
+ * Ports can be closed and freed but hotplugging is not
+ * currently supported
+ */
+ return -ENOTSUP;
+ }
+
+ /* VF cleanup, just free private port data */
+ return nfp_net_close(eth_dev);
+}
+
static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
{
- struct rte_eth_dev *eth_dev;
- struct nfp_net_hw *hw, *hwport0;
- int port = 0;
-
- eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
- if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
- (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
- port = get_pf_port_number(eth_dev->data->name);
- /*
- * hotplug is not possible with multiport PF although freeing
- * data structures can be done for first port.
- */
- if (port != 0)
- return -ENOTSUP;
- hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- hw = &hwport0[port];
- nfp_cpp_area_free(hw->ctrl_area);
- nfp_cpp_area_free(hw->hwqueues_area);
- free(hw->hwinfo);
- free(hw->sym_tbl);
- nfp_cpp_free(hw->cpp);
- } else {
- hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- }
- /* hotplug is not possible with multiport PF */
- if (hw->pf_multiport_enabled)
- return -ENOTSUP;
- return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+ return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
}
static struct rte_pci_driver rte_nfp_net_pf_pmd = {
RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
-
-RTE_INIT(nfp_init_log)
-{
- nfp_logtype_init = rte_log_register("pmd.net.nfp.init");
- if (nfp_logtype_init >= 0)
- rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE);
- nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver");
- if (nfp_logtype_driver >= 0)
- rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(nfp_logtype_init, pmd.net.nfp.init, NOTICE);
+RTE_LOG_REGISTER(nfp_logtype_driver, pmd.net.nfp.driver, NOTICE);
/*
* Local variables:
* c-file-style: "Linux"