X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=a46c4dd1cc0c8eb02e7147a89c6a2b54ce186a0a;hb=7f8e73201dae6e605df6a9cdc24d9004b2590424;hp=82e3e4e19ff8d78807ea613875af02be7b10462f;hpb=cb6696d22023efad238709239792ec66b0920017;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 82e3e4e19f..a46c4dd1cc 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -1,34 +1,8 @@ -/* - * Copyright (c) 2014, 2015 Netronome Systems, Inc. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2014-2018 Netronome Systems, Inc. * All rights reserved. * * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution - * - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ /* @@ -39,13 +13,12 @@ * Netronome vNIC DPDK Poll-Mode Driver: Main entry point */ -#include - #include #include #include #include -#include +#include +#include #include #include #include @@ -55,301 +28,44 @@ #include #include #include +#include + +#include "eal_firmware.h" + +#include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nffw.h" +#include "nfpcore/nfp_hwinfo.h" +#include "nfpcore/nfp_mip.h" +#include "nfpcore/nfp_rtsym.h" +#include "nfpcore/nfp_nsp.h" #include "nfp_net_pmd.h" +#include "nfp_rxtx.h" #include "nfp_net_logs.h" #include "nfp_net_ctrl.h" +#include "nfp_cpp_bridge.h" + +#include +#include +#include +#include +#include +#include +#include /* Prototypes */ -static void nfp_net_close(struct rte_eth_dev *dev); -static int nfp_net_configure(struct rte_eth_dev *dev); -static void nfp_net_dev_interrupt_handler(struct rte_intr_handle *handle, - void *param); -static void nfp_net_dev_interrupt_delayed_handler(void *param); -static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -static void nfp_net_infos_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); +static int nfp_net_close(struct rte_eth_dev *dev); static int nfp_net_init(struct rte_eth_dev *eth_dev); -static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void nfp_net_promisc_enable(struct rte_eth_dev *dev); -static void nfp_net_promisc_disable(struct rte_eth_dev *dev); -static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq); -static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev, - uint16_t queue_idx); -static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -static void nfp_net_rx_queue_release(void *rxq); -static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, - uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, - struct rte_mempool *mp); -static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq); -static void nfp_net_tx_queue_release(void *txq); -static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, - uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_txconf *tx_conf); -static int nfp_net_start(struct rte_eth_dev *dev); -static void nfp_net_stats_get(struct rte_eth_dev *dev, - struct rte_eth_stats *stats); -static void nfp_net_stats_reset(struct rte_eth_dev *dev); -static void nfp_net_stop(struct rte_eth_dev *dev); -static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); - -/* - * The offset of the queue controller queues in the PCIe Target. These - * happen to be at the same offset on the NFP6000 and the NFP3200 so - * we use a single macro here. - */ -#define NFP_PCIE_QUEUE(_q) (0x80000 + (0x800 * ((_q) & 0xff))) - -/* Maximum value which can be added to a queue with one transaction */ -#define NFP_QCP_MAX_ADD 0x7f - -#define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \ - (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) - -/* nfp_qcp_ptr - Read or Write Pointer of a queue */ -enum nfp_qcp_ptr { - NFP_QCP_READ_PTR = 0, - NFP_QCP_WRITE_PTR -}; - -/* - * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue - * @q: Base address for queue structure - * @ptr: Add to the Read or Write pointer - * @val: Value to add to the queue pointer - * - * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed. - */ -static inline void -nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val) -{ - uint32_t off; - - if (ptr == NFP_QCP_READ_PTR) - off = NFP_QCP_QUEUE_ADD_RPTR; - else - off = NFP_QCP_QUEUE_ADD_WPTR; - - while (val > NFP_QCP_MAX_ADD) { - nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off); - val -= NFP_QCP_MAX_ADD; - } - - nn_writel(rte_cpu_to_le_32(val), q + off); -} - -/* - * nfp_qcp_read - Read the current Read/Write pointer value for a queue - * @q: Base address for queue structure - * @ptr: Read or Write pointer - */ -static inline uint32_t -nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr) -{ - uint32_t off; - uint32_t val; - - if (ptr == NFP_QCP_READ_PTR) - off = NFP_QCP_QUEUE_STS_LO; - else - off = NFP_QCP_QUEUE_STS_HI; - - val = rte_cpu_to_le_32(nn_readl(q + off)); - - if (ptr == NFP_QCP_READ_PTR) - return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask; - else - return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask; -} - -/* - * Functions to read/write from/to Config BAR - * Performs any endian conversion necessary. - */ -static inline uint8_t -nn_cfg_readb(struct nfp_net_hw *hw, int off) -{ - return nn_readb(hw->ctrl_bar + off); -} - -static inline void -nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val) -{ - nn_writeb(val, hw->ctrl_bar + off); -} - -static inline uint32_t -nn_cfg_readl(struct nfp_net_hw *hw, int off) -{ - return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off)); -} - -static inline void -nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val) -{ - nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off); -} - -static inline uint64_t -nn_cfg_readq(struct nfp_net_hw *hw, int off) -{ - return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off)); -} - -static inline void -nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val) -{ - nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off); -} - -/* Creating memzone for hardware rings. */ -static const struct rte_memzone * -ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, - uint16_t queue_id, uint32_t ring_size, int socket_id) -{ - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, - ring_name, dev->data->port_id, queue_id); - - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, - NFP_MEMZONE_ALIGN); -} - -/* - * Atomically reads link status information from global structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &dev->data->dev_link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/* - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -static void -nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq) -{ - unsigned i; - - if (rxq->rxbufs == NULL) - return; - - for (i = 0; i < rxq->rx_count; i++) { - if (rxq->rxbufs[i].mbuf) { - rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf); - rxq->rxbufs[i].mbuf = NULL; - } - } -} - -static void -nfp_net_rx_queue_release(void *rx_queue) -{ - struct nfp_net_rxq *rxq = rx_queue; - - if (rxq) { - nfp_net_rx_queue_release_mbufs(rxq); - rte_free(rxq->rxbufs); - rte_free(rxq); - } -} - -static void -nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq) -{ - nfp_net_rx_queue_release_mbufs(rxq); - rxq->wr_p = 0; - rxq->rd_p = 0; - rxq->nb_rx_hold = 0; -} - -static void -nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) -{ - unsigned i; - - if (txq->txbufs == NULL) - return; - - for (i = 0; i < txq->tx_count; i++) { - if (txq->txbufs[i].mbuf) { - rte_pktmbuf_free(txq->txbufs[i].mbuf); - txq->txbufs[i].mbuf = NULL; - } - } -} - -static void -nfp_net_tx_queue_release(void *tx_queue) -{ - struct nfp_net_txq *txq = tx_queue; - - if (txq) { - nfp_net_tx_queue_release_mbufs(txq); - rte_free(txq->txbufs); - rte_free(txq); - } -} - -static void -nfp_net_reset_tx_queue(struct nfp_net_txq *txq) -{ - nfp_net_tx_queue_release_mbufs(txq); - txq->wr_p = 0; - txq->rd_p = 0; - txq->tail = 0; - txq->qcp_rd_p = 0; -} +static int nfp_pf_init(struct rte_pci_device *pci_dev); +static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev); +static int nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port); +static int nfp_pci_uninit(struct rte_eth_dev *eth_dev); +static int nfp_init_phyports(struct nfp_pf_dev *pf_dev); +static int nfp_net_stop(struct rte_eth_dev *dev); +static int nfp_fw_setup(struct rte_pci_device *dev, + struct nfp_cpp *cpp, + struct nfp_eth_table *nfp_eth_table, + struct nfp_hwinfo *hwinfo); static int __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) @@ -358,7 +74,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) uint32_t new; struct timespec wait; - PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n", + PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...", hw->qcp_cfg); if (hw->qcp_cfg == NULL) @@ -369,7 +85,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) wait.tv_sec = 0; wait.tv_nsec = 1000000; - PMD_DRV_LOG(DEBUG, "Polling for update ack...\n"); + PMD_DRV_LOG(DEBUG, "Polling for update ack..."); /* Poll update field, waiting for NFP to ack the config */ for (cnt = 0; ; cnt++) { @@ -377,17 +93,17 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) if (new == 0) break; if (new & NFP_NET_CFG_UPDATE_ERR) { - PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x\n", new); + PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new); return -1; } if (cnt >= NFP_NET_POLL_TIMEOUT) { PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after" - " %dms\n", update, cnt); + " %dms", update, cnt); rte_panic("Exiting\n"); } nanosleep(&wait, 0); /* waiting for a 1ms */ } - PMD_DRV_LOG(DEBUG, "Ack DONE\n"); + PMD_DRV_LOG(DEBUG, "Ack DONE"); return 0; } @@ -400,12 +116,12 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) * Write the update word to the BAR and ping the reconfig queue. Then poll * until the firmware has acknowledged the update by zeroing the update word. */ -static int +int nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) { uint32_t err; - PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n", + PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x", ctrl, update); rte_spinlock_lock(&hw->reconfig_lock); @@ -426,7 +142,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) * Reconfig errors imply situations where they can be handled. * Otherwise, rte_panic is called inside __nfp_net_reconfig */ - PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x\n", + PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x", ctrl, update); return -EIO; } @@ -436,14 +152,12 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) * before any other function in the Ethernet API. This function can * also be re-invoked when a device is in the stopped state. */ -static int +int nfp_net_configure(struct rte_eth_dev *dev) { struct rte_eth_conf *dev_conf; struct rte_eth_rxmode *rxmode; struct rte_eth_txmode *txmode; - uint32_t new_ctrl = 0; - uint32_t update = 0; struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -456,94 +170,32 @@ nfp_net_configure(struct rte_eth_dev *dev) * called after that internal process */ - PMD_INIT_LOG(DEBUG, "Configure\n"); + PMD_INIT_LOG(DEBUG, "Configure"); dev_conf = &dev->data->dev_conf; rxmode = &dev_conf->rxmode; txmode = &dev_conf->txmode; + if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) + rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; + /* Checking TX mode */ if (txmode->mq_mode) { - PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported\n"); + PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported"); return -EINVAL; } /* Checking RX mode */ - if (rxmode->mq_mode & ETH_MQ_RX_RSS) { - if (hw->cap & NFP_NET_CFG_CTRL_RSS) { - update = NFP_NET_CFG_UPDATE_RSS; - new_ctrl = NFP_NET_CFG_CTRL_RSS; - } else { - PMD_INIT_LOG(INFO, "RSS not supported\n"); - return -EINVAL; - } - } - - if (rxmode->split_hdr_size) { - PMD_INIT_LOG(INFO, "rxmode does not support split header\n"); - return -EINVAL; - } - - if (rxmode->hw_ip_checksum) { - if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) { - new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM; - } else { - PMD_INIT_LOG(INFO, "RXCSUM not supported\n"); - return -EINVAL; - } - } - - if (rxmode->hw_vlan_filter) { - PMD_INIT_LOG(INFO, "VLAN filter not supported\n"); - return -EINVAL; - } - - if (rxmode->hw_vlan_strip) { - if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) { - new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN; - } else { - PMD_INIT_LOG(INFO, "hw vlan strip not supported\n"); - return -EINVAL; - } - } - - if (rxmode->hw_vlan_extend) { - PMD_INIT_LOG(INFO, "VLAN extended not supported\n"); - return -EINVAL; - } - - /* Supporting VLAN insertion by default */ - if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) - new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN; - - if (rxmode->jumbo_frame) - /* this is handled in rte_eth_dev_configure */ - - if (rxmode->hw_strip_crc) { - PMD_INIT_LOG(INFO, "strip CRC not supported\n"); - return -EINVAL; - } - - if (rxmode->enable_scatter) { - PMD_INIT_LOG(INFO, "Scatter not supported\n"); + if (rxmode->mq_mode & ETH_MQ_RX_RSS && + !(hw->cap & NFP_NET_CFG_CTRL_RSS)) { + PMD_INIT_LOG(INFO, "RSS not supported"); return -EINVAL; } - if (!new_ctrl) - return 0; - - update |= NFP_NET_CFG_UPDATE_GEN; - - nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return -EIO; - - hw->ctrl = new_ctrl; - return 0; } -static void +void nfp_net_enable_queues(struct rte_eth_dev *dev) { struct nfp_net_hw *hw; @@ -567,7 +219,7 @@ nfp_net_enable_queues(struct rte_eth_dev *dev) nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues); } -static void +void nfp_net_disable_queues(struct rte_eth_dev *dev) { struct nfp_net_hw *hw; @@ -592,117 +244,338 @@ nfp_net_disable_queues(struct rte_eth_dev *dev) hw->ctrl = new_ctrl; } -static int -nfp_net_rx_freelist_setup(struct rte_eth_dev *dev) -{ - int i; - - for (i = 0; i < dev->data->nb_rx_queues; i++) { - if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0) - return -1; - } - return 0; -} - -static void +void nfp_net_params_setup(struct nfp_net_hw *hw) { - uint32_t *mac_address; - nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu); nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz); - - /* A MAC address is 8 bytes long */ - mac_address = (uint32_t *)(hw->mac_addr); - - nn_cfg_writel(hw, NFP_NET_CFG_MACADDR, - rte_cpu_to_be_32(*mac_address)); - nn_cfg_writel(hw, NFP_NET_CFG_MACADDR + 4, - rte_cpu_to_be_32(*(mac_address + 4))); } -static void +void nfp_net_cfg_queue_setup(struct nfp_net_hw *hw) { hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; } +#define ETH_ADDR_LEN 6 + +void +nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src) +{ + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) + dst[i] = src[i]; +} + static int -nfp_net_start(struct rte_eth_dev *dev) +nfp_net_pf_read_mac(struct nfp_pf_dev *pf_dev, int port) { - uint32_t new_ctrl, update = 0; - struct nfp_net_hw *hw; - int ret; + struct nfp_eth_table *nfp_eth_table; + struct nfp_net_hw *hw = NULL; - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* Grab a pointer to the correct physical port */ + hw = pf_dev->ports[port]; - PMD_INIT_LOG(DEBUG, "Start\n"); + nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp); - /* Disabling queues just in case... */ - nfp_net_disable_queues(dev); + nfp_eth_copy_mac((uint8_t *)&hw->mac_addr, + (uint8_t *)&nfp_eth_table->ports[port].mac_addr); - /* Writing configuration parameters in the device */ - nfp_net_params_setup(hw); + free(nfp_eth_table); + return 0; +} - /* Enabling the required queues in the device */ - nfp_net_enable_queues(dev); +void +nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac) +{ + uint32_t mac0 = *(uint32_t *)mac; + uint16_t mac1; - /* Enable device */ - new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_UPDATE_MSIX; - update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; + nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR); - if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) - new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; + mac += 4; + mac1 = *(uint16_t *)mac; + nn_writew(rte_cpu_to_be_16(mac1), + hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6); +} - nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return -EIO; +int +nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +{ + struct nfp_net_hw *hw; + uint32_t update, ctrl; - /* - * Allocating rte mbuffs for configured rx queues. - * This requires queues being enabled before - */ - if (nfp_net_rx_freelist_setup(dev) < 0) { - ret = -ENOMEM; - goto error; + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) { + PMD_INIT_LOG(INFO, "MAC address unable to change when" + " port enabled"); + return -EBUSY; } - hw->ctrl = new_ctrl; - + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) + return -EBUSY; + + /* Writing new MAC to the specific port BAR address */ + nfp_net_write_mac(hw, (uint8_t *)mac_addr); + + /* Signal the NIC about the change */ + update = NFP_NET_CFG_UPDATE_MACADDR; + ctrl = hw->ctrl; + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) + ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR; + if (nfp_net_reconfig(hw, ctrl, update) < 0) { + PMD_INIT_LOG(INFO, "MAC address update failed"); + return -EIO; + } return 0; - -error: - /* - * An error returned by this function should mean the app - * exiting and then the system releasing all the memory - * allocated even memory coming from hugepages. - * - * The device could be enabled at this point with some queues - * ready for getting packets. This is true if the call to - * nfp_net_rx_freelist_setup() succeeds for some queues but - * fails for subsequent queues. - * - * This should make the app exiting but better if we tell the - * device first. - */ - nfp_net_disable_queues(dev); - - return ret; } -/* Stop device: disable rx and tx functions to allow for reconfiguring. */ -static void -nfp_net_stop(struct rte_eth_dev *dev) +int +nfp_configure_rx_interrupt(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) { + struct nfp_net_hw *hw; int i; - PMD_INIT_LOG(DEBUG, "Stop\n"); + if (!intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + return -ENOMEM; + } + } - nfp_net_disable_queues(dev); + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - /* Clear queues */ - for (i = 0; i < dev->data->nb_tx_queues; i++) { - nfp_net_reset_tx_queue( + if (intr_handle->type == RTE_INTR_HANDLE_UIO) { + PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO"); + /* UIO just supports one queue and no LSC*/ + nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0); + intr_handle->intr_vec[0] = 0; + } else { + PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO"); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + /* + * The first msix vector is reserved for non + * efd interrupts + */ + nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1); + intr_handle->intr_vec[i] = i + 1; + PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i, + intr_handle->intr_vec[i]); + } + } + + /* Avoiding TX interrupts */ + hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF; + return 0; +} + +uint32_t +nfp_check_offloads(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + struct rte_eth_txmode *txmode; + uint32_t ctrl = 0; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_conf = &dev->data->dev_conf; + rxmode = &dev_conf->rxmode; + txmode = &dev_conf->txmode; + + if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) { + if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) + ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + } + + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) + ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + } + + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + hw->mtu = rxmode->max_rx_pkt_len; + + if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) + ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + + /* L2 broadcast */ + if (hw->cap & NFP_NET_CFG_CTRL_L2BC) + ctrl |= NFP_NET_CFG_CTRL_L2BC; + + /* L2 multicast */ + if (hw->cap & NFP_NET_CFG_CTRL_L2MC) + ctrl |= NFP_NET_CFG_CTRL_L2MC; + + /* TX checksum offload */ + if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM || + txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM || + txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) + ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + + /* LSO offload */ + if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) { + if (hw->cap & NFP_NET_CFG_CTRL_LSO) + ctrl |= NFP_NET_CFG_CTRL_LSO; + else + ctrl |= NFP_NET_CFG_CTRL_LSO2; + } + + /* RX gather */ + if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + ctrl |= NFP_NET_CFG_CTRL_GATHER; + + return ctrl; +} + +static int +nfp_net_start(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t new_ctrl, update = 0; + struct nfp_net_hw *hw; + struct nfp_pf_dev *pf_dev; + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + uint32_t intr_vector; + int ret; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + PMD_INIT_LOG(DEBUG, "Start"); + + /* Disabling queues just in case... */ + nfp_net_disable_queues(dev); + + /* Enabling the required queues in the device */ + nfp_net_enable_queues(dev); + + /* check and configure queue intr-vector mapping */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + if (pf_dev->multiport) { + PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " + "with NFP multiport PF"); + return -EINVAL; + } + if (intr_handle->type == RTE_INTR_HANDLE_UIO) { + /* + * Better not to share LSC with RX interrupts. + * Unregistering LSC interrupt handler + */ + rte_intr_callback_unregister(&pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, (void *)dev); + + if (dev->data->nb_rx_queues > 1) { + PMD_INIT_LOG(ERR, "PMD rx interrupt only " + "supports 1 queue with UIO"); + return -EIO; + } + } + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + + nfp_configure_rx_interrupt(dev, intr_handle); + update = NFP_NET_CFG_UPDATE_MSIX; + } + + rte_intr_enable(intr_handle); + + new_ctrl = nfp_check_offloads(dev); + + /* Writing configuration parameters in the device */ + nfp_net_params_setup(hw); + + dev_conf = &dev->data->dev_conf; + rxmode = &dev_conf->rxmode; + + if (rxmode->mq_mode & ETH_MQ_RX_RSS) { + nfp_net_rss_config_default(dev); + update |= NFP_NET_CFG_UPDATE_RSS; + new_ctrl |= NFP_NET_CFG_CTRL_RSS; + } + + /* Enable device */ + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; + + update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; + + if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) + new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; + + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return -EIO; + + /* + * Allocating rte mbufs for configured rx queues. + * This requires queues being enabled before + */ + if (nfp_net_rx_freelist_setup(dev) < 0) { + ret = -ENOMEM; + goto error; + } + + if (hw->is_phyport) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port up */ + nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); + else + nfp_eth_set_configured(dev->process_private, + hw->nfp_idx, 1); + } + + hw->ctrl = new_ctrl; + + return 0; + +error: + /* + * An error returned by this function should mean the app + * exiting and then the system releasing all the memory + * allocated even memory coming from hugepages. + * + * The device could be enabled at this point with some queues + * ready for getting packets. This is true if the call to + * nfp_net_rx_freelist_setup() succeeds for some queues but + * fails for subsequent queues. + * + * This should make the app exiting but better if we tell the + * device first. + */ + nfp_net_disable_queues(dev); + + return ret; +} + +/* Stop device: disable rx and tx functions to allow for reconfiguring. */ +static int +nfp_net_stop(struct rte_eth_dev *dev) +{ + int i; + struct nfp_net_hw *hw; + + PMD_INIT_LOG(DEBUG, "Stop"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + nfp_net_disable_queues(dev); + + /* Clear queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + nfp_net_reset_tx_queue( (struct nfp_net_txq *)dev->data->tx_queues[i]); } @@ -710,52 +583,156 @@ nfp_net_stop(struct rte_eth_dev *dev) nfp_net_reset_rx_queue( (struct nfp_net_rxq *)dev->data->rx_queues[i]); } + + if (hw->is_phyport) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); + else + nfp_eth_set_configured(dev->process_private, + hw->nfp_idx, 0); + } + + return 0; +} + +/* Set the link up. */ +static int +nfp_net_set_link_up(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link up"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hw->is_phyport) + return -ENOTSUP; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); + else + return nfp_eth_set_configured(dev->process_private, + hw->nfp_idx, 1); +} + +/* Set the link down. */ +static int +nfp_net_set_link_down(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link down"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hw->is_phyport) + return -ENOTSUP; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); + else + return nfp_eth_set_configured(dev->process_private, + hw->nfp_idx, 0); } /* Reset and stop device. The device can not be restarted. */ -static void +static int nfp_net_close(struct rte_eth_dev *dev) { struct nfp_net_hw *hw; + struct rte_pci_device *pci_dev; + int i; - PMD_INIT_LOG(DEBUG, "Close\n"); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + PMD_INIT_LOG(DEBUG, "Close"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); /* * We assume that the DPDK application is stopping all the * threads/queues before calling the device close function. */ - nfp_net_stop(dev); + nfp_net_disable_queues(dev); + + /* Clear queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + nfp_net_reset_tx_queue( + (struct nfp_net_txq *)dev->data->tx_queues[i]); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + nfp_net_reset_rx_queue( + (struct nfp_net_rxq *)dev->data->rx_queues[i]); + } + + /* Only free PF resources after all physical ports have been closed */ + if (pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC || + pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC) { + struct nfp_pf_dev *pf_dev; + pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + /* Mark this port as unused and free device priv resources*/ + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); + pf_dev->ports[hw->idx] = NULL; + rte_eth_dev_release_port(dev); + + for (i = 0; i < pf_dev->total_phyports; i++) { + /* Check to see if ports are still in use */ + if (pf_dev->ports[i]) + return 0; + } + + /* Now it is safe to free all PF resources */ + PMD_INIT_LOG(INFO, "Freeing PF resources"); + nfp_cpp_area_free(pf_dev->ctrl_area); + nfp_cpp_area_free(pf_dev->hwqueues_area); + free(pf_dev->hwinfo); + free(pf_dev->sym_tbl); + nfp_cpp_free(pf_dev->cpp); + rte_free(pf_dev); + } + + rte_intr_disable(&pci_dev->intr_handle); - rte_intr_disable(&dev->pci_dev->intr_handle); - nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); + /* unregister callback func from eal lib */ + rte_intr_callback_unregister(&pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, + (void *)dev); /* * The ixgbe PMD driver disables the pcie master on the * device. The i40e does not... */ + + return 0; } -static void +int nfp_net_promisc_enable(struct rte_eth_dev *dev) { uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + int ret; - PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n"); + PMD_DRV_LOG(DEBUG, "Promiscuous mode enable"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) { - PMD_INIT_LOG(INFO, "Promiscuous mode not supported\n"); - return; + PMD_INIT_LOG(INFO, "Promiscuous mode not supported"); + return -ENOTSUP; } if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { - PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n"); - return; + PMD_DRV_LOG(INFO, "Promiscuous mode already enabled"); + return 0; } new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC; @@ -765,23 +742,27 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) * DPDK sets promiscuous mode on just after this call assuming * it can not fail ... */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; hw->ctrl = new_ctrl; + + return 0; } -static void +int nfp_net_promisc_disable(struct rte_eth_dev *dev) { uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + int ret; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { - PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n"); - return; + PMD_DRV_LOG(INFO, "Promiscuous mode already disabled"); + return 0; } new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC; @@ -791,10 +772,13 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) * DPDK sets promiscuous mode off just before this call * assuming it can not fail ... */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; hw->ctrl = new_ctrl; + + return 0; } /* @@ -803,20 +787,29 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) * Wait to complete is needed as it can take up to 9 seconds to get the Link * status. */ -static int +int nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { struct nfp_net_hw *hw; - struct rte_eth_link link, old; + struct rte_eth_link link; uint32_t nn_link_status; + int ret; + + static const uint32_t ls_to_ethtool[] = { + [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE, + [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE, + [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G, + [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G, + [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G, + [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G, + [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G, + [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G, + }; - PMD_DRV_LOG(DEBUG, "Link update\n"); + PMD_DRV_LOG(DEBUG, "Link update"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - memset(&old, 0, sizeof(old)); - nfp_net_dev_atomic_read_link_status(dev, &old); - nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS); memset(&link, 0, sizeof(struct rte_eth_link)); @@ -825,22 +818,26 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; - /* Other cards can limit the tx and rx rate per VF */ - link.link_speed = ETH_SPEED_NUM_40G; - if (old.link_status != link.link_status) { - nfp_net_dev_atomic_write_link_status(dev, &link); + nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) & + NFP_NET_CFG_STS_LINK_RATE_MASK; + + if (nn_link_status >= RTE_DIM(ls_to_ethtool)) + link.link_speed = ETH_SPEED_NUM_NONE; + else + link.link_speed = ls_to_ethtool[nn_link_status]; + + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == 0) { if (link.link_status) - PMD_DRV_LOG(INFO, "NIC Link is Up\n"); + PMD_DRV_LOG(INFO, "NIC Link is Up"); else - PMD_DRV_LOG(INFO, "NIC Link is Down\n"); - return 0; + PMD_DRV_LOG(INFO, "NIC Link is Down"); } - - return -1; + return ret; } -static void +int nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { int i; @@ -851,6 +848,8 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */ + memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats)); + /* reading per RX ring stats */ for (i = 0; i < dev->data->nb_rx_queues; i++) { if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) @@ -926,11 +925,14 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) nfp_dev_stats.imissed -= hw->eth_stats_base.imissed; - if (stats) + if (stats) { memcpy(stats, &nfp_dev_stats, sizeof(*stats)); + return 0; + } + return -EINVAL; } -static void +int nfp_net_stats_reset(struct rte_eth_dev *dev) { int i; @@ -991,20 +993,21 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.imissed = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); + + return 0; } -static void +int nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->driver_name = dev->driver->pci_drv.name; dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; - dev_info->min_rx_bufsize = ETHER_MIN_MTU; - dev_info->max_rx_pktlen = hw->mtu; + dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU; + dev_info->max_rx_pktlen = hw->max_mtu; /* Next should change when PF support is implemented */ dev_info->max_mac_addrs = 1; @@ -1021,8 +1024,14 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + + if (hw->cap & NFP_NET_CFG_CTRL_GATHER) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -1042,17 +1051,47 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, }; - dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; - dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = NFP_NET_MAX_RX_DESC, + .nb_min = NFP_NET_MIN_RX_DESC, + .nb_align = NFP_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = NFP_NET_MAX_TX_DESC, + .nb_min = NFP_NET_MIN_TX_DESC, + .nb_align = NFP_ALIGN_RING_DESC, + .nb_seg_max = NFP_TX_MAX_SEG, + .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG, + }; + + /* All NFP devices support jumbo frames */ + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + + if (hw->cap & NFP_NET_CFG_CTRL_RSS) { + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH; + + dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_IPV6 | + ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_NONFRAG_IPV6_UDP; + + dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; + dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; + } + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; - dev_info->speed_capa = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G; + return 0; } -static const uint32_t * +const uint32_t * nfp_net_supported_ptypes_get(struct rte_eth_dev *dev) { static const uint32_t ptypes[] = { @@ -1069,69 +1108,64 @@ nfp_net_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } -static uint32_t -nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) +int +nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct nfp_net_rxq *rxq; - struct nfp_net_rx_desc *rxds; - uint32_t idx; - uint32_t count; + struct rte_pci_device *pci_dev; + struct nfp_net_hw *hw; + int base = 0; - rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx]; + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); - if (rxq == NULL) { - PMD_INIT_LOG(ERR, "Bad queue: %u\n", queue_idx); - return 0; - } + if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO) + base = 1; - idx = rxq->rd_p % rxq->rx_count; - rxds = &rxq->rxds[idx]; + /* Make sure all updates are written before un-masking */ + rte_wmb(); + nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), + NFP_NET_CFG_ICR_UNMASKED); + return 0; +} - count = 0; +int +nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev; + struct nfp_net_hw *hw; + int base = 0; - /* - * Other PMDs are just checking the DD bit in intervals of 4 - * descriptors and counting all four if the first has the DD - * bit on. Of course, this is not accurate but can be good for - * perfomance. But ideally that should be done in descriptors - * chunks belonging to the same cache line - */ - - while (count < rxq->rx_count) { - rxds = &rxq->rxds[idx]; - if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) - break; - - count++; - idx++; + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); - /* Wrapping? */ - if ((idx) == rxq->rx_count) - idx = 0; - } + if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO) + base = 1; - return count; + /* Make sure all updates are written before un-masking */ + rte_wmb(); + nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1); + return 0; } static void nfp_net_dev_link_status_print(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; - memset(&link, 0, sizeof(link)); - nfp_net_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); if (link.link_status) - RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n", - (int)(dev->data->port_id), (unsigned)link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX - ? "full-duplex" : "half-duplex"); + PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX + ? "full-duplex" : "half-duplex"); else - RTE_LOG(INFO, PMD, " Port %d: Link Down\n", - (int)(dev->data->port_id)); + PMD_DRV_LOG(INFO, " Port %d: Link Down", + dev->data->port_id); - RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n", - dev->pci_dev->addr.domain, dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, dev->pci_dev->addr.function); + PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); } /* Interrupt configuration and handling */ @@ -1146,13 +1180,15 @@ static void nfp_net_irq_unmask(struct rte_eth_dev *dev) { struct nfp_net_hw *hw; + struct rte_pci_device *pci_dev; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { /* If MSI-X auto-masking is used, clear the entry */ rte_wmb(); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_ack(&pci_dev->intr_handle); } else { /* Make sure all updates are written before un-masking */ rte_wmb(); @@ -1161,19 +1197,41 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev) } } +/* + * Interrupt handler which shall be registered for alarm callback for delayed + * handling specific interrupt to wait for the stable nic state. As the NIC + * interrupt state is not stable for nfp after link is just down, it needs + * to wait 4 seconds to get the stable status. + * + * @param handle Pointer to interrupt handle. + * @param param The address of parameter (struct rte_eth_dev *) + * + * @return void + */ static void -nfp_net_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, - void *param) +nfp_net_dev_interrupt_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + nfp_net_link_update(dev, 0); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + + nfp_net_dev_link_status_print(dev); + + /* Unmasking */ + nfp_net_irq_unmask(dev); +} + +void +nfp_net_dev_interrupt_handler(void *param) { int64_t timeout; struct rte_eth_link link; struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n"); + PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!"); - /* get the link status */ - memset(&link, 0, sizeof(link)); - nfp_net_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); nfp_net_link_update(dev, 0); @@ -1190,38 +1248,13 @@ nfp_net_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, if (rte_eal_alarm_set(timeout * 1000, nfp_net_dev_interrupt_delayed_handler, (void *)dev) < 0) { - RTE_LOG(ERR, PMD, "Error setting alarm"); + PMD_INIT_LOG(ERR, "Error setting alarm"); /* Unmasking */ nfp_net_irq_unmask(dev); } } -/* - * Interrupt handler which shall be registered for alarm callback for delayed - * handling specific interrupt to wait for the stable nic state. As the NIC - * interrupt state is not stable for nfp after link is just down, it needs - * to wait 4 seconds to get the stable status. - * - * @param handle Pointer to interrupt handle. - * @param param The address of parameter (struct rte_eth_dev *) - * - * @return void - */ -static void -nfp_net_dev_interrupt_delayed_handler(void *param) -{ - struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - - nfp_net_link_update(dev, 0); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); - - nfp_net_dev_link_status_print(dev); - - /* Unmasking */ - nfp_net_irq_unmask(dev); -} - -static int +int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct nfp_net_hw *hw; @@ -1229,14 +1262,21 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu)) + if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu) return -EINVAL; + /* mtu setting is forbidden if port is started */ + if (dev->data->dev_started) { + PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", + dev->data->port_id); + return -EBUSY; + } + /* switch to jumbo mode if needed */ - if ((uint32_t)mtu > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + if ((uint32_t)mtu > RTE_ETHER_MTU) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu; @@ -1249,798 +1289,16 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return 0; } -static int -nfp_net_rx_queue_setup(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t nb_desc, - unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, - struct rte_mempool *mp) -{ - const struct rte_memzone *tz; - struct nfp_net_rxq *rxq; - struct nfp_net_hw *hw; - - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - - PMD_INIT_FUNC_TRACE(); - - /* Validating number of descriptors */ - if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 || - (nb_desc > NFP_NET_MAX_RX_DESC) || - (nb_desc < NFP_NET_MIN_RX_DESC)) { - RTE_LOG(ERR, PMD, "Wrong nb_desc value\n"); - return -EINVAL; - } - - /* - * Free memory prior to re-allocation if needed. This is the case after - * calling nfp_net_stop - */ - if (dev->data->rx_queues[queue_idx]) { - nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]); - dev->data->rx_queues[queue_idx] = NULL; - } - - /* Allocating rx queue data structure */ - rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq), - RTE_CACHE_LINE_SIZE, socket_id); - if (rxq == NULL) - return -ENOMEM; - - /* Hw queues mapping based on firmware confifguration */ - rxq->qidx = queue_idx; - rxq->fl_qcidx = queue_idx * hw->stride_rx; - rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1); - rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx); - rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx); - - /* - * Tracking mbuf size for detecting a potential mbuf overflow due to - * RX offset - */ - rxq->mem_pool = mp; - rxq->mbuf_size = rxq->mem_pool->elt_size; - rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM); - hw->flbufsz = rxq->mbuf_size; - - rxq->rx_count = nb_desc; - rxq->port_id = dev->data->port_id; - rxq->rx_free_thresh = rx_conf->rx_free_thresh; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 - : ETHER_CRC_LEN); - rxq->drop_en = rx_conf->rx_drop_en; - - /* - * Allocate RX ring hardware descriptors. A memzone large enough to - * handle the maximum ring size is allocated in order to allow for - * resizing in later calls to the queue setup function. - */ - tz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, - sizeof(struct nfp_net_rx_desc) * - NFP_NET_MAX_RX_DESC, socket_id); - - if (tz == NULL) { - RTE_LOG(ERR, PMD, "Error allocatig rx dma\n"); - nfp_net_rx_queue_release(rxq); - return -ENOMEM; - } - - /* Saving physical and virtual addresses for the RX ring */ - rxq->dma = (uint64_t)tz->phys_addr; - rxq->rxds = (struct nfp_net_rx_desc *)tz->addr; - - /* mbuf pointers array for referencing mbufs linked to RX descriptors */ - rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs", - sizeof(*rxq->rxbufs) * nb_desc, - RTE_CACHE_LINE_SIZE, socket_id); - if (rxq->rxbufs == NULL) { - nfp_net_rx_queue_release(rxq); - return -ENOMEM; - } - - PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n", - rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma); - - nfp_net_reset_rx_queue(rxq); - - dev->data->rx_queues[queue_idx] = rxq; - rxq->hw = hw; - - /* - * Telling the HW about the physical address of the RX ring and number - * of descriptors in log2 format - */ - nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma); - nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc)); - - return 0; -} - -static int -nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) -{ - struct nfp_net_rx_buff *rxe = rxq->rxbufs; - uint64_t dma_addr; - unsigned i; - - PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n", - rxq->rx_count); - - for (i = 0; i < rxq->rx_count; i++) { - struct nfp_net_rx_desc *rxd; - struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool); - - if (mbuf == NULL) { - RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n", - (unsigned)rxq->qidx); - return -ENOMEM; - } - - dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf)); - - rxd = &rxq->rxds[i]; - rxd->fld.dd = 0; - rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; - rxd->fld.dma_addr_lo = dma_addr & 0xffffffff; - rxe[i].mbuf = mbuf; - PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr); - - rxq->wr_p++; - } - - /* Make sure all writes are flushed before telling the hardware */ - rte_wmb(); - - /* Not advertising the whole ring as the firmware gets confused if so */ - PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n", - rxq->rx_count - 1); - - nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1); - - return 0; -} - -static int -nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, - uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_txconf *tx_conf) -{ - const struct rte_memzone *tz; - struct nfp_net_txq *txq; - uint16_t tx_free_thresh; - struct nfp_net_hw *hw; - - hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - - PMD_INIT_FUNC_TRACE(); - - /* Validating number of descriptors */ - if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 || - (nb_desc > NFP_NET_MAX_TX_DESC) || - (nb_desc < NFP_NET_MIN_TX_DESC)) { - RTE_LOG(ERR, PMD, "Wrong nb_desc value\n"); - return -EINVAL; - } - - tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? - tx_conf->tx_free_thresh : - DEFAULT_TX_FREE_THRESH); - - if (tx_free_thresh > (nb_desc)) { - RTE_LOG(ERR, PMD, - "tx_free_thresh must be less than the number of TX " - "descriptors. (tx_free_thresh=%u port=%d " - "queue=%d)\n", (unsigned int)tx_free_thresh, - (int)dev->data->port_id, (int)queue_idx); - return -(EINVAL); - } - - /* - * Free memory prior to re-allocation if needed. This is the case after - * calling nfp_net_stop - */ - if (dev->data->tx_queues[queue_idx]) { - PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n", - queue_idx); - nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]); - dev->data->tx_queues[queue_idx] = NULL; - } - - /* Allocating tx queue data structure */ - txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq), - RTE_CACHE_LINE_SIZE, socket_id); - if (txq == NULL) { - RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); - return -ENOMEM; - } - - /* - * Allocate TX ring hardware descriptors. A memzone large enough to - * handle the maximum ring size is allocated in order to allow for - * resizing in later calls to the queue setup function. - */ - tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, - sizeof(struct nfp_net_tx_desc) * - NFP_NET_MAX_TX_DESC, socket_id); - if (tz == NULL) { - RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); - nfp_net_tx_queue_release(txq); - return -ENOMEM; - } - - txq->tx_count = nb_desc; - txq->tail = 0; - txq->tx_free_thresh = tx_free_thresh; - txq->tx_pthresh = tx_conf->tx_thresh.pthresh; - txq->tx_hthresh = tx_conf->tx_thresh.hthresh; - txq->tx_wthresh = tx_conf->tx_thresh.wthresh; - - /* queue mapping based on firmware configuration */ - txq->qidx = queue_idx; - txq->tx_qcidx = queue_idx * hw->stride_tx; - txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx); - - txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; - - /* Saving physical and virtual addresses for the TX ring */ - txq->dma = (uint64_t)tz->phys_addr; - txq->txds = (struct nfp_net_tx_desc *)tz->addr; - - /* mbuf pointers array for referencing mbufs linked to TX descriptors */ - txq->txbufs = rte_zmalloc_socket("txq->txbufs", - sizeof(*txq->txbufs) * nb_desc, - RTE_CACHE_LINE_SIZE, socket_id); - if (txq->txbufs == NULL) { - nfp_net_tx_queue_release(txq); - return -ENOMEM; - } - PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n", - txq->txbufs, txq->txds, (unsigned long int)txq->dma); - - nfp_net_reset_tx_queue(txq); - - dev->data->tx_queues[queue_idx] = txq; - txq->hw = hw; - - /* - * Telling the HW about the physical address of the TX ring and number - * of descriptors in log2 format - */ - nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma); - nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc)); - - return 0; -} - -/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */ -static inline void -nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, - struct rte_mbuf *mb) -{ - uint64_t ol_flags; - struct nfp_net_hw *hw = txq->hw; - - if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) - return; - - ol_flags = mb->ol_flags; - - /* IPv6 does not need checksum */ - if (ol_flags & PKT_TX_IP_CKSUM) - txd->flags |= PCIE_DESC_TX_IP4_CSUM; - - switch (ol_flags & PKT_TX_L4_MASK) { - case PKT_TX_UDP_CKSUM: - txd->flags |= PCIE_DESC_TX_UDP_CSUM; - break; - case PKT_TX_TCP_CKSUM: - txd->flags |= PCIE_DESC_TX_TCP_CSUM; - break; - } - - if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)) - txd->flags |= PCIE_DESC_TX_CSUM; -} - -/* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */ -static inline void -nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, - struct rte_mbuf *mb) -{ - struct nfp_net_hw *hw = rxq->hw; - - if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM)) - return; - - /* If IPv4 and IP checksum error, fail */ - if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) && - !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)) - mb->ol_flags |= PKT_RX_IP_CKSUM_BAD; - - /* If neither UDP nor TCP return */ - if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) && - !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM)) - return; - - if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) && - !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK)) - mb->ol_flags |= PKT_RX_L4_CKSUM_BAD; - - if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) && - !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK)) - mb->ol_flags |= PKT_RX_L4_CKSUM_BAD; -} - -#define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4) -#define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8) - -/* - * nfp_net_set_hash - Set mbuf hash data - * - * The RSS hash and hash-type are pre-pended to the packet data. - * Extract and decode it and set the mbuf fields. - */ -static inline void -nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, - struct rte_mbuf *mbuf) -{ - uint32_t hash; - uint32_t hash_type; - struct nfp_net_hw *hw = rxq->hw; - - if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) - return; - - if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) - return; - - hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET); - hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET); - - /* - * hash type is sharing the same word with input port info - * 31-8: input port - * 7:0: hash type - */ - hash_type &= 0xff; - mbuf->hash.rss = hash; - mbuf->ol_flags |= PKT_RX_RSS_HASH; - - switch (hash_type) { - case NFP_NET_RSS_IPV4: - mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4; - break; - case NFP_NET_RSS_IPV6: - mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6; - break; - case NFP_NET_RSS_IPV6_EX: - mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; - break; - default: - mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK; - } -} - -/* nfp_net_check_port - Set mbuf in_port field */ -static void -nfp_net_check_port(struct nfp_net_rx_desc *rxd, struct rte_mbuf *mbuf) -{ - uint32_t port; - - if (!(rxd->rxd.flags & PCIE_DESC_RX_INGRESS_PORT)) { - mbuf->port = 0; - return; - } - - port = rte_be_to_cpu_32(*(uint32_t *)((uint8_t *)mbuf->buf_addr + - mbuf->data_off - 8)); - - /* - * hash type is sharing the same word with input port info - * 31-8: input port - * 7:0: hash type - */ - port = (uint8_t)(port >> 8); - mbuf->port = port; -} - -static inline void -nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq) -{ - rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; -} - -#define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK) - -/* - * RX path design: - * - * There are some decissions to take: - * 1) How to check DD RX descriptors bit - * 2) How and when to allocate new mbufs - * - * Current implementation checks just one single DD bit each loop. As each - * descriptor is 8 bytes, it is likely a good idea to check descriptors in - * a single cache line instead. Tests with this change have not shown any - * performance improvement but it requires further investigation. For example, - * depending on which descriptor is next, the number of descriptors could be - * less than 8 for just checking those in the same cache line. This implies - * extra work which could be counterproductive by itself. Indeed, last firmware - * changes are just doing this: writing several descriptors with the DD bit - * for saving PCIe bandwidth and DMA operations from the NFP. - * - * Mbuf allocation is done when a new packet is received. Then the descriptor - * is automatically linked with the new mbuf and the old one is given to the - * user. The main drawback with this design is mbuf allocation is heavier than - * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the - * cache point of view it does not seem allocating the mbuf early on as we are - * doing now have any benefit at all. Again, tests with this change have not - * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing - * so looking at the implications of this type of allocation should be studied - * deeply - */ - -static uint16_t -nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) -{ - struct nfp_net_rxq *rxq; - struct nfp_net_rx_desc *rxds; - struct nfp_net_rx_buff *rxb; - struct nfp_net_hw *hw; - struct rte_mbuf *mb; - struct rte_mbuf *new_mb; - int idx; - uint16_t nb_hold; - uint64_t dma_addr; - int avail; - - rxq = rx_queue; - if (unlikely(rxq == NULL)) { - /* - * DPDK just checks the queue is lower than max queues - * enabled. But the queue needs to be configured - */ - RTE_LOG(ERR, PMD, "RX Bad queue\n"); - return -EINVAL; - } - - hw = rxq->hw; - avail = 0; - nb_hold = 0; - - while (avail < nb_pkts) { - idx = rxq->rd_p % rxq->rx_count; - - rxb = &rxq->rxbufs[idx]; - if (unlikely(rxb == NULL)) { - RTE_LOG(ERR, PMD, "rxb does not exist!\n"); - break; - } - - /* - * Memory barrier to ensure that we won't do other - * reads before the DD bit. - */ - rte_rmb(); - - rxds = &rxq->rxds[idx]; - if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) - break; - - /* - * We got a packet. Let's alloc a new mbuff for refilling the - * free descriptor ring as soon as possible - */ - new_mb = rte_pktmbuf_alloc(rxq->mem_pool); - if (unlikely(new_mb == NULL)) { - RTE_LOG(DEBUG, PMD, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned)rxq->port_id, - (unsigned)rxq->qidx); - nfp_net_mbuf_alloc_failed(rxq); - break; - } - - nb_hold++; - - /* - * Grab the mbuff and refill the descriptor with the - * previously allocated mbuff - */ - mb = rxb->mbuf; - rxb->mbuf = new_mb; - - PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n", - rxds->rxd.data_len, rxq->mbuf_size); - - /* Size of this segment */ - mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds); - /* Size of the whole packet. We just support 1 segment */ - mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds); - - if (unlikely((mb->data_len + hw->rx_offset) > - rxq->mbuf_size)) { - /* - * This should not happen and the user has the - * responsibility of avoiding it. But we have - * to give some info about the error - */ - RTE_LOG(ERR, PMD, - "mbuf overflow likely due to the RX offset.\n" - "\t\tYour mbuf size should have extra space for" - " RX offset=%u bytes.\n" - "\t\tCurrently you just have %u bytes available" - " but the received packet is %u bytes long", - hw->rx_offset, - rxq->mbuf_size - hw->rx_offset, - mb->data_len); - return -EINVAL; - } - - /* Filling the received mbuff with packet info */ - if (hw->rx_offset) - mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset; - else - mb->data_off = RTE_PKTMBUF_HEADROOM + - NFP_DESC_META_LEN(rxds); - - /* No scatter mode supported */ - mb->nb_segs = 1; - mb->next = NULL; - - /* Checking the RSS flag */ - nfp_net_set_hash(rxq, rxds, mb); - - /* Checking the checksum flag */ - nfp_net_rx_cksum(rxq, rxds, mb); - - /* Checking the port flag */ - nfp_net_check_port(rxds, mb); - - if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) && - (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) { - mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan); - mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; - } - - /* Adding the mbuff to the mbuff array passed by the app */ - rx_pkts[avail++] = mb; - - /* Now resetting and updating the descriptor */ - rxds->vals[0] = 0; - rxds->vals[1] = 0; - dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb)); - rxds->fld.dd = 0; - rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; - rxds->fld.dma_addr_lo = dma_addr & 0xffffffff; - - rxq->rd_p++; - } - - if (nb_hold == 0) - return nb_hold; - - PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n", - (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold); - - nb_hold += rxq->nb_rx_hold; - - /* - * FL descriptors needs to be written before incrementing the - * FL queue WR pointer - */ - rte_wmb(); - if (nb_hold > rxq->rx_free_thresh) { - PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n", - (unsigned)rxq->port_id, (unsigned)rxq->qidx, - (unsigned)nb_hold, (unsigned)avail); - nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold); - nb_hold = 0; - } - rxq->nb_rx_hold = nb_hold; - - return avail; -} - -/* - * nfp_net_tx_free_bufs - Check for descriptors with a complete - * status - * @txq: TX queue to work with - * Returns number of descriptors freed - */ int -nfp_net_tx_free_bufs(struct nfp_net_txq *txq) -{ - uint32_t qcp_rd_p; - int todo; - - PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete" - " status\n", txq->qidx); - - /* Work out how many packets have been sent */ - qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR); - - if (qcp_rd_p == txq->qcp_rd_p) { - PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending " - "packets (%u, %u)\n", txq->qidx, - qcp_rd_p, txq->qcp_rd_p); - return 0; - } - - if (qcp_rd_p > txq->qcp_rd_p) - todo = qcp_rd_p - txq->qcp_rd_p; - else - todo = qcp_rd_p + txq->tx_count - txq->qcp_rd_p; - - PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->qcp_rd_p: %u, qcp->rd_p: %u\n", - qcp_rd_p, txq->qcp_rd_p, txq->rd_p); - - if (todo == 0) - return todo; - - txq->qcp_rd_p += todo; - txq->qcp_rd_p %= txq->tx_count; - txq->rd_p += todo; - - return todo; -} - -/* Leaving always free descriptors for avoiding wrapping confusion */ -#define NFP_FREE_TX_DESC(t) (t->tx_count - (t->wr_p - t->rd_p) - 8) - -/* - * nfp_net_txq_full - Check if the TX queue free descriptors - * is below tx_free_threshold - * - * @txq: TX queue to check - * - * This function uses the host copy* of read/write pointers - */ -static inline -int nfp_net_txq_full(struct nfp_net_txq *txq) -{ - return NFP_FREE_TX_DESC(txq) < txq->tx_free_thresh; -} - -static uint16_t -nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) -{ - struct nfp_net_txq *txq; - struct nfp_net_hw *hw; - struct nfp_net_tx_desc *txds; - struct rte_mbuf *pkt; - uint64_t dma_addr; - int pkt_size, dma_size; - uint16_t free_descs, issued_descs; - struct rte_mbuf **lmbuf; - int i; - - txq = tx_queue; - hw = txq->hw; - txds = &txq->txds[txq->tail]; - - PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n", - txq->qidx, txq->tail, nb_pkts); - - if ((NFP_FREE_TX_DESC(txq) < nb_pkts) || (nfp_net_txq_full(txq))) - nfp_net_tx_free_bufs(txq); - - free_descs = (uint16_t)NFP_FREE_TX_DESC(txq); - if (unlikely(free_descs == 0)) - return 0; - - pkt = *tx_pkts; - - i = 0; - issued_descs = 0; - PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n", - txq->qidx, nb_pkts); - /* Sending packets */ - while ((i < nb_pkts) && free_descs) { - /* Grabbing the mbuf linked to the current descriptor */ - lmbuf = &txq->txbufs[txq->tail].mbuf; - /* Warming the cache for releasing the mbuf later on */ - RTE_MBUF_PREFETCH_TO_FREE(*lmbuf); - - pkt = *(tx_pkts + i); - - if (unlikely((pkt->nb_segs > 1) && - !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) { - PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set\n"); - rte_panic("Multisegment packet unsupported\n"); - } - - /* Checking if we have enough descriptors */ - if (unlikely(pkt->nb_segs > free_descs)) - goto xmit_end; - - /* - * Checksum and VLAN flags just in the first descriptor for a - * multisegment packet - */ - nfp_net_tx_cksum(txq, txds, pkt); - - if ((pkt->ol_flags & PKT_TX_VLAN_PKT) && - (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) { - txds->flags |= PCIE_DESC_TX_VLAN; - txds->vlan = pkt->vlan_tci; - } - - if (pkt->ol_flags & PKT_TX_TCP_SEG) - rte_panic("TSO is not supported\n"); - - /* - * mbuf data_len is the data in one segment and pkt_len data - * in the whole packet. When the packet is just one segment, - * then data_len = pkt_len - */ - pkt_size = pkt->pkt_len; - - /* Releasing mbuf which was prefetched above */ - if (*lmbuf) - rte_pktmbuf_free(*lmbuf); - /* - * Linking mbuf with descriptor for being released - * next time descriptor is used - */ - *lmbuf = pkt; - - while (pkt_size) { - dma_size = pkt->data_len; - dma_addr = rte_mbuf_data_dma_addr(pkt); - PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" - "%" PRIx64 "\n", dma_addr); - - /* Filling descriptors fields */ - txds->dma_len = dma_size; - txds->data_len = pkt->pkt_len; - txds->dma_addr_hi = (dma_addr >> 32) & 0xff; - txds->dma_addr_lo = (dma_addr & 0xffffffff); - ASSERT(free_descs > 0); - free_descs--; - - txq->wr_p++; - txq->tail++; - if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/ - txq->tail = 0; - - pkt_size -= dma_size; - if (!pkt_size) { - /* End of packet */ - txds->offset_eop |= PCIE_DESC_TX_EOP; - } else { - txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK; - pkt = pkt->next; - } - /* Referencing next free TX descriptor */ - txds = &txq->txds[txq->tail]; - issued_descs++; - } - i++; - } - -xmit_end: - /* Increment write pointers. Force memory write before we let HW know */ - rte_wmb(); - nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs); - - return i; -} - -static void nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) { uint32_t new_ctrl, update; struct nfp_net_hw *hw; + int ret; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); new_ctrl = 0; - if ((mask & ETH_VLAN_FILTER_OFFLOAD) || - (mask & ETH_VLAN_FILTER_OFFLOAD)) - RTE_LOG(INFO, PMD, "Not support for ETH_VLAN_FILTER_OFFLOAD or" - " ETH_VLAN_FILTER_EXTEND"); - /* Enable vlan strip if it is not configured yet */ if ((mask & ETH_VLAN_STRIP_OFFLOAD) && !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) @@ -2052,36 +1310,32 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN; if (new_ctrl == 0) - return; + return 0; update = NFP_NET_CFG_UPDATE_GEN; - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (!ret) + hw->ctrl = new_ctrl; - hw->ctrl = new_ctrl; + return ret; } -/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */ static int -nfp_net_reta_update(struct rte_eth_dev *dev, +nfp_net_rss_reta_write(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { uint32_t reta, mask; int i, j; int idx, shift; - uint32_t update; struct nfp_net_hw *hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) - return -EINVAL; - if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { - RTE_LOG(ERR, PMD, "The size of hash lookup table configured " + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); return -EINVAL; } @@ -2111,8 +1365,29 @@ nfp_net_reta_update(struct rte_eth_dev *dev, reta &= ~(0xFF << (8 * j)); reta |= reta_conf[idx].reta[shift + j] << (8 * j); } - nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta); + nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, + reta); } + return 0; +} + +/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */ +int +nfp_net_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct nfp_net_hw *hw = + NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t update; + int ret; + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) + return -EINVAL; + + ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size); + if (ret != 0) + return ret; update = NFP_NET_CFG_UPDATE_RSS; @@ -2123,7 +1398,7 @@ nfp_net_reta_update(struct rte_eth_dev *dev, } /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */ -static int +int nfp_net_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) @@ -2139,9 +1414,9 @@ nfp_net_reta_query(struct rte_eth_dev *dev, return -EINVAL; if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { - RTE_LOG(ERR, PMD, "The size of hash lookup table configured " + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); return -EINVAL; } @@ -2158,11 +1433,12 @@ nfp_net_reta_query(struct rte_eth_dev *dev, if (!mask) continue; - reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift); + reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + + shift); for (j = 0; j < 4; j++) { if (!(mask & (0x1 << j))) continue; - reta_conf->reta[shift + j] = + reta_conf[idx].reta[shift + j] = (uint8_t)((reta >> (8 * j)) & 0xF); } } @@ -2170,14 +1446,61 @@ nfp_net_reta_query(struct rte_eth_dev *dev, } static int -nfp_net_rss_hash_update(struct rte_eth_dev *dev, +nfp_net_rss_hash_write(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { - uint32_t update; + struct nfp_net_hw *hw; + uint64_t rss_hf; uint32_t cfg_rss_ctrl = 0; uint8_t key; - uint64_t rss_hf; int i; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Writing the key byte a byte */ + for (i = 0; i < rss_conf->rss_key_len; i++) { + memcpy(&key, &rss_conf->rss_key[i], 1); + nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key); + } + + rss_hf = rss_conf->rss_hf; + + if (rss_hf & ETH_RSS_IPV4) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP; + + if (rss_hf & ETH_RSS_IPV6) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6; + + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP; + + if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP; + + cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK; + cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ; + + /* configuring where to apply the RSS hash */ + nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl); + + /* Writing the key size */ + nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len); + + return 0; +} + +int +nfp_net_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + uint32_t update; + uint64_t rss_hf; struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -2187,38 +1510,18 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, /* Checking if RSS is enabled */ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) { if (rss_hf != 0) { /* Enable RSS? */ - RTE_LOG(ERR, PMD, "RSS unsupported\n"); + PMD_DRV_LOG(ERR, "RSS unsupported"); return -EINVAL; } return 0; /* Nothing to do */ } if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) { - RTE_LOG(ERR, PMD, "hash key too long\n"); + PMD_DRV_LOG(ERR, "hash key too long"); return -EINVAL; } - if (rss_hf & ETH_RSS_IPV4) - cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 | - NFP_NET_CFG_RSS_IPV4_TCP | - NFP_NET_CFG_RSS_IPV4_UDP; - - if (rss_hf & ETH_RSS_IPV6) - cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 | - NFP_NET_CFG_RSS_IPV6_TCP | - NFP_NET_CFG_RSS_IPV6_UDP; - - /* configuring where to apply the RSS hash */ - nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl); - - /* Writing the key byte a byte */ - for (i = 0; i < rss_conf->rss_key_len; i++) { - memcpy(&key, &rss_conf->rss_key[i], 1); - nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key); - } - - /* Writing the key size */ - nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len); + nfp_net_rss_hash_write(dev, rss_conf); update = NFP_NET_CFG_UPDATE_RSS; @@ -2228,7 +1531,7 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, return 0; } -static int +int nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { @@ -2264,6 +1567,9 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP; + /* Propagate current RSS hash functions to caller */ + rss_conf->rss_hf = rss_hf; + /* Reading the key size */ rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ); @@ -2276,11 +1582,54 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, return 0; } +int +nfp_net_rss_config_default(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *dev_conf; + struct rte_eth_rss_conf rss_conf; + struct rte_eth_rss_reta_entry64 nfp_reta_conf[2]; + uint16_t rx_queues = dev->data->nb_rx_queues; + uint16_t queue; + int i, j, ret; + + PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues", + rx_queues); + + nfp_reta_conf[0].mask = ~0x0; + nfp_reta_conf[1].mask = ~0x0; + + queue = 0; + for (i = 0; i < 0x40; i += 8) { + for (j = i; j < (i + 8); j++) { + nfp_reta_conf[0].reta[j] = queue; + nfp_reta_conf[1].reta[j] = queue++; + queue %= rx_queues; + } + } + ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80); + if (ret != 0) + return ret; + + dev_conf = &dev->data->dev_conf; + if (!dev_conf) { + PMD_DRV_LOG(INFO, "wrong rss conf"); + return -EINVAL; + } + rss_conf = dev_conf->rx_adv_conf.rss_conf; + + ret = nfp_net_rss_hash_write(dev, &rss_conf); + + return ret; +} + + /* Initialise and register driver with DPDK Application */ static const struct eth_dev_ops nfp_net_eth_dev_ops = { .dev_configure = nfp_net_configure, .dev_start = nfp_net_start, .dev_stop = nfp_net_stop, + .dev_set_link_up = nfp_net_set_link_up, + .dev_set_link_down = nfp_net_set_link_down, .dev_close = nfp_net_close, .promiscuous_enable = nfp_net_promisc_enable, .promiscuous_disable = nfp_net_promisc_disable, @@ -2290,6 +1639,7 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = { .dev_infos_get = nfp_net_infos_get, .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, .mtu_set = nfp_net_dev_mtu_set, + .mac_addr_set = nfp_set_mac_addr, .vlan_offload_set = nfp_net_vlan_offload_set, .reta_update = nfp_net_reta_update, .reta_query = nfp_net_reta_query, @@ -2297,26 +1647,64 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = { .rss_hash_conf_get = nfp_net_rss_hash_conf_get, .rx_queue_setup = nfp_net_rx_queue_setup, .rx_queue_release = nfp_net_rx_queue_release, - .rx_queue_count = nfp_net_rx_queue_count, .tx_queue_setup = nfp_net_tx_queue_setup, .tx_queue_release = nfp_net_tx_queue_release, + .rx_queue_intr_enable = nfp_rx_queue_intr_enable, + .rx_queue_intr_disable = nfp_rx_queue_intr_disable, }; + static int nfp_net_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; + struct nfp_pf_dev *pf_dev; struct nfp_net_hw *hw; - uint32_t tx_bar_off, rx_bar_off; + uint64_t tx_bar_off = 0, rx_bar_off = 0; uint32_t start_q; int stride = 4; + int port = 0; + int err; PMD_INIT_FUNC_TRACE(); - hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + /* Use backpointer here to the PF of this eth_dev */ + pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private); + + /* NFP can not handle DMA addresses requiring more than 40 bits */ + if (rte_mem_check_dma_mask(40)) { + RTE_LOG(ERR, PMD, "device %s can not be used:", + pci_dev->device.name); + RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n"); + return -ENODEV; + }; + + if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) || + (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) { + port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; + if (port < 0 || port > 7) { + PMD_DRV_LOG(ERR, "Port value is wrong"); + return -ENODEV; + } + + /* Use PF array of physical ports to get pointer to + * this specific port + */ + hw = pf_dev->ports[port]; + + PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, " + "NFP internal port number: %d", + port, hw->nfp_idx); + + } else { + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + } eth_dev->dev_ops = &nfp_net_eth_dev_ops; + eth_dev->rx_queue_count = nfp_net_rx_queue_count; eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts; @@ -2324,7 +1712,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pci_dev = eth_dev->pci_dev; rte_eth_copy_pci_info(eth_dev, pci_dev); hw->device_id = pci_dev->id.device_id; @@ -2332,40 +1719,68 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->subsystem_device_id = pci_dev->id.subsystem_device_id; hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; - PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u\n", + PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u", pci_dev->id.vendor_id, pci_dev->id.device_id, pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr; if (hw->ctrl_bar == NULL) { - RTE_LOG(ERR, PMD, - "hw->ctrl_bar is NULL. BAR0 not configured\n"); + PMD_DRV_LOG(ERR, + "hw->ctrl_bar is NULL. BAR0 not configured"); return -ENODEV; } + + if (hw->is_phyport) { + if (port == 0) { + hw->ctrl_bar = pf_dev->ctrl_bar; + } else { + if (!pf_dev->ctrl_bar) + return -ENODEV; + /* Use port offset in pf ctrl_bar for this + * ports control bar + */ + hw->ctrl_bar = pf_dev->ctrl_bar + + (port * NFP_PF_CSR_SLICE_SIZE); + } + } + + PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); + hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS); hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS); /* Work out where in the BAR the queues start. */ switch (pci_dev->id.device_id) { + case PCI_DEVICE_ID_NFP4000_PF_NIC: + case PCI_DEVICE_ID_NFP6000_PF_NIC: case PCI_DEVICE_ID_NFP6000_VF_NIC: start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); - tx_bar_off = NFP_PCIE_QUEUE(start_q); + tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); - rx_bar_off = NFP_PCIE_QUEUE(start_q); + rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; break; default: - RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n"); - return -ENODEV; + PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); + err = -ENODEV; + goto dev_err_ctrl_map; } - PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x\n", tx_bar_off); - PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x\n", rx_bar_off); + PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off); + PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off); - hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off; - hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off; + if (hw->is_phyport) { + hw->tx_bar = pf_dev->hw_queues + tx_bar_off; + hw->rx_bar = pf_dev->hw_queues + rx_bar_off; + eth_dev->data->dev_private = hw; + } else { + hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + + tx_bar_off; + hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + + rx_bar_off; + } - PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p\n", + PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", hw->ctrl_bar, hw->tx_bar, hw->rx_bar); nfp_net_cfg_queue_setup(hw); @@ -2374,51 +1789,79 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); - hw->mtu = hw->max_mtu; + hw->mtu = RTE_ETHER_MTU; + + /* VLAN insertion is incompatible with LSOv2 */ + if (hw->cap & NFP_NET_CFG_CTRL_LSO2) + hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) hw->rx_offset = NFP_NET_RX_OFFSET; else hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR); - PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d\n", - hw->ver, hw->max_mtu); - PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s\n", hw->cap, + PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d", + NFD_CFG_MAJOR_VERSION_of(hw->ver), + NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu); + + PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap, hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", + hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", + hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "", hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", + hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "", hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", - hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : ""); + hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "", + hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "", + hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : ""); - pci_dev = eth_dev->pci_dev; hw->ctrl = 0; hw->stride_rx = stride; hw->stride_tx = stride; - PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n", + PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u", hw->max_rx_queues, hw->max_tx_queues); /* Initializing spinlock for reconfigs */ rte_spinlock_init(&hw->reconfig_lock); /* Allocating memory for mac addr */ - eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", + RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to space for MAC address"); - return -ENOMEM; + err = -ENOMEM; + goto dev_err_queues_map; } - /* Using random mac addresses for VFs */ - eth_random_addr(&hw->mac_addr[0]); + if (hw->is_phyport) { + nfp_net_pf_read_mac(pf_dev, port); + nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); + } + + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)&hw->mac_addr)) { + PMD_INIT_LOG(INFO, "Using random mac address for port %d", + port); + /* Using random mac addresses for VFs */ + rte_eth_random_addr(&hw->mac_addr[0]); + nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); + } /* Copying mac address to DPDK eth_dev struct */ - ether_addr_copy(ð_dev->data->mac_addrs[0], - (struct ether_addr *)hw->mac_addr); + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, + ð_dev->data->mac_addrs[0]); + + if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) + eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; + + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " "mac=%02x:%02x:%02x:%02x:%02x:%02x", @@ -2427,68 +1870,500 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2], hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]); - /* Registering LSC interrupt handler */ - rte_intr_callback_register(&pci_dev->intr_handle, - nfp_net_dev_interrupt_handler, - (void *)eth_dev); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* Registering LSC interrupt handler */ + rte_intr_callback_register(&pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, + (void *)eth_dev); + /* Telling the firmware about the LSC interrupt entry */ + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); + /* Recording current stats counters values */ + nfp_net_stats_reset(eth_dev); + } + + return 0; + +dev_err_queues_map: + nfp_cpp_area_free(hw->hwqueues_area); +dev_err_ctrl_map: + nfp_cpp_area_free(hw->ctrl_area); + + return err; +} + +#define DEFAULT_FW_PATH "/lib/firmware/netronome" + +static int +nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) +{ + struct nfp_cpp *cpp = nsp->cpp; + void *fw_buf; + char fw_name[125]; + char serial[40]; + size_t fsize; + + /* Looking for firmware file in order of priority */ + + /* First try to find a firmware image specific for this device */ + snprintf(serial, sizeof(serial), + "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", + cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3], + cpp->serial[4], cpp->serial[5], cpp->interface >> 8, + cpp->interface & 0xff); + + snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, + serial); + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) + goto load_fw; + + /* Then try the PCI name */ + snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, + dev->device.name); + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) + goto load_fw; + + /* Finally try the card type and media */ + snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) { + PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name); + return -ENOENT; + } + +load_fw: + PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", + fw_name, fsize); + + PMD_DRV_LOG(INFO, "Uploading the firmware ..."); + nfp_nsp_load_fw(nsp, fw_buf, fsize); + PMD_DRV_LOG(INFO, "Done"); + + free(fw_buf); + return 0; +} + +static int +nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp, + struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo) +{ + struct nfp_nsp *nsp; + const char *nfp_fw_model; + char card_desc[100]; + int err = 0; + + nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); + + if (nfp_fw_model) { + PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); + } else { + PMD_DRV_LOG(ERR, "firmware model NOT found"); + return -EIO; + } + + if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { + PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", + nfp_eth_table->count); + return -EIO; + } + + PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", + nfp_eth_table->count); + + PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); + + snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", + nfp_fw_model, nfp_eth_table->count, + nfp_eth_table->ports[0].speed / 1000); + + nsp = nfp_nsp_open(cpp); + if (!nsp) { + PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); + return -EIO; + } + + nfp_nsp_device_soft_reset(nsp); + err = nfp_fw_upload(dev, nsp, card_desc); + + nfp_nsp_close(nsp); + return err; +} + +static int nfp_init_phyports(struct nfp_pf_dev *pf_dev) +{ + struct nfp_net_hw *hw; + struct rte_eth_dev *eth_dev; + struct nfp_eth_table *nfp_eth_table = NULL; + int ret = 0; + int i; + + nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp); + if (!nfp_eth_table) { + PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); + ret = -EIO; + goto error; + } + + /* Loop through all physical ports on PF */ + for (i = 0; i < pf_dev->total_phyports; i++) { + const unsigned int numa_node = rte_socket_id(); + char port_name[RTE_ETH_NAME_MAX_LEN]; + + snprintf(port_name, sizeof(port_name), "%s_port%d", + pf_dev->pci_dev->device.name, i); + + /* Allocate a eth_dev for this phyport */ + eth_dev = rte_eth_dev_allocate(port_name); + if (!eth_dev) { + ret = -ENODEV; + goto port_cleanup; + } + + /* Allocate memory for this phyport */ + eth_dev->data->dev_private = + rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw), + RTE_CACHE_LINE_SIZE, numa_node); + if (!eth_dev->data->dev_private) { + ret = -ENOMEM; + rte_eth_dev_release_port(eth_dev); + goto port_cleanup; + } + + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + /* Add this device to the PF's array of physical ports */ + pf_dev->ports[i] = hw; + + hw->pf_dev = pf_dev; + hw->cpp = pf_dev->cpp; + hw->eth_dev = eth_dev; + hw->idx = i; + hw->nfp_idx = nfp_eth_table->ports[i].index; + hw->is_phyport = true; + + eth_dev->device = &pf_dev->pci_dev->device; + + /* ctrl/tx/rx BAR mappings and remaining init happens in + * nfp_net_init + */ + ret = nfp_net_init(eth_dev); + + if (ret) { + ret = -ENODEV; + goto port_cleanup; + } + + rte_eth_dev_probing_finish(eth_dev); + + } /* End loop, all ports on this PF */ + ret = 0; + goto eth_table_cleanup; + +port_cleanup: + for (i = 0; i < pf_dev->total_phyports; i++) { + if (pf_dev->ports[i] && pf_dev->ports[i]->eth_dev) { + struct rte_eth_dev *tmp_dev; + tmp_dev = pf_dev->ports[i]->eth_dev; + rte_eth_dev_release_port(tmp_dev); + pf_dev->ports[i] = NULL; + } + } +eth_table_cleanup: + free(nfp_eth_table); +error: + return ret; +} + +static int nfp_pf_init(struct rte_pci_device *pci_dev) +{ + struct nfp_pf_dev *pf_dev = NULL; + struct nfp_cpp *cpp; + struct nfp_hwinfo *hwinfo; + struct nfp_rtsym_table *sym_tbl; + struct nfp_eth_table *nfp_eth_table = NULL; + char name[RTE_ETH_NAME_MAX_LEN]; + int total_ports; + int ret = -ENODEV; + int err; + + if (!pci_dev) + return ret; + + /* + * When device bound to UIO, the device could be used, by mistake, + * by two DPDK apps, and the UIO driver does not avoid it. This + * could lead to a serious problem when configuring the NFP CPP + * interface. Here we avoid this telling to the CPP init code to + * use a lock file if UIO is being used. + */ + if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) + cpp = nfp_cpp_from_device_name(pci_dev, 0); + else + cpp = nfp_cpp_from_device_name(pci_dev, 1); + + if (!cpp) { + PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); + ret = -EIO; + goto error; + } + + hwinfo = nfp_hwinfo_read(cpp); + if (!hwinfo) { + PMD_INIT_LOG(ERR, "Error reading hwinfo table"); + ret = -EIO; + goto error; + } + + nfp_eth_table = nfp_eth_read_ports(cpp); + if (!nfp_eth_table) { + PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); + ret = -EIO; + goto hwinfo_cleanup; + } + + if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { + PMD_INIT_LOG(ERR, "Error when uploading firmware"); + ret = -EIO; + goto eth_table_cleanup; + } + + /* Now the symbol table should be there */ + sym_tbl = nfp_rtsym_table_read(cpp); + if (!sym_tbl) { + PMD_INIT_LOG(ERR, "Something is wrong with the firmware" + " symbol table"); + ret = -EIO; + goto eth_table_cleanup; + } + + total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); + if (total_ports != (int)nfp_eth_table->count) { + PMD_DRV_LOG(ERR, "Inconsistent number of ports"); + ret = -EIO; + goto sym_tbl_cleanup; + } + + PMD_INIT_LOG(INFO, "Total physical ports: %d", total_ports); + + if (total_ports <= 0 || total_ports > 8) { + PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); + ret = -ENODEV; + goto sym_tbl_cleanup; + } + /* Allocate memory for the PF "device" */ + snprintf(name, sizeof(name), "nfp_pf%d", 0); + pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); + if (!pf_dev) { + ret = -ENOMEM; + goto sym_tbl_cleanup; + } + + /* Populate the newly created PF device */ + pf_dev->cpp = cpp; + pf_dev->hwinfo = hwinfo; + pf_dev->sym_tbl = sym_tbl; + pf_dev->total_phyports = total_ports; + + if (total_ports > 1) + pf_dev->multiport = true; + + pf_dev->pci_dev = pci_dev; + + /* Map the symbol table */ + pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0", + pf_dev->total_phyports * 32768, + &pf_dev->ctrl_area); + if (!pf_dev->ctrl_bar) { + PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar"); + ret = -EIO; + goto pf_cleanup; + } + + PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); + + /* configure access to tx/rx vNIC BARs */ + pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0, + NFP_PCIE_QUEUE(0), + NFP_QCP_QUEUE_AREA_SZ, + &pf_dev->hwqueues_area); + if (!pf_dev->hw_queues) { + PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); + ret = -EIO; + goto ctrl_area_cleanup; + } + + PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues); + + /* Initialize and prep physical ports now + * This will loop through all physical ports + */ + ret = nfp_init_phyports(pf_dev); + if (ret) { + PMD_INIT_LOG(ERR, "Could not create physical ports"); + goto hwqueues_cleanup; + } + + /* register the CPP bridge service here for primary use */ + nfp_register_cpp_service(pf_dev->cpp); + + return 0; + +hwqueues_cleanup: + nfp_cpp_area_free(pf_dev->hwqueues_area); +ctrl_area_cleanup: + nfp_cpp_area_free(pf_dev->ctrl_area); +pf_cleanup: + rte_free(pf_dev); +sym_tbl_cleanup: + free(sym_tbl); +eth_table_cleanup: + free(nfp_eth_table); +hwinfo_cleanup: + free(hwinfo); +error: + return ret; +} + +/* + * When attaching to the NFP4000/6000 PF on a secondary process there + * is no need to initialize the PF again. Only minimal work is required + * here + */ +static int nfp_pf_secondary_init(struct rte_pci_device *pci_dev) +{ + struct nfp_cpp *cpp; + struct nfp_rtsym_table *sym_tbl; + int total_ports; + int i; + int err; + + if (!pci_dev) + return -ENODEV; + + /* + * When device bound to UIO, the device could be used, by mistake, + * by two DPDK apps, and the UIO driver does not avoid it. This + * could lead to a serious problem when configuring the NFP CPP + * interface. Here we avoid this telling to the CPP init code to + * use a lock file if UIO is being used. + */ + if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO) + cpp = nfp_cpp_from_device_name(pci_dev, 0); + else + cpp = nfp_cpp_from_device_name(pci_dev, 1); + + if (!cpp) { + PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); + return -EIO; + } - /* enable uio intr after callback register */ - rte_intr_enable(&pci_dev->intr_handle); + /* + * We don't have access to the PF created in the primary process + * here so we have to read the number of ports from firmware + */ + sym_tbl = nfp_rtsym_table_read(cpp); + if (!sym_tbl) { + PMD_INIT_LOG(ERR, "Something is wrong with the firmware" + " symbol table"); + return -EIO; + } - /* Telling the firmware about the LSC interrupt entry */ - nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); + total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); + + for (i = 0; i < total_ports; i++) { + struct rte_eth_dev *eth_dev; + char port_name[RTE_ETH_NAME_MAX_LEN]; + + snprintf(port_name, sizeof(port_name), "%s_port%d", + pci_dev->device.name, i); + + PMD_DRV_LOG(DEBUG, "Secondary attaching to port %s", + port_name); + eth_dev = rte_eth_dev_attach_secondary(port_name); + if (!eth_dev) { + RTE_LOG(ERR, EAL, + "secondary process attach failed, " + "ethdev doesn't exist"); + return -ENODEV; + } + eth_dev->process_private = cpp; + eth_dev->dev_ops = &nfp_net_eth_dev_ops; + eth_dev->rx_queue_count = nfp_net_rx_queue_count; + eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; + eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts; + rte_eth_dev_probing_finish(eth_dev); + } - /* Recording current stats counters values */ - nfp_net_stats_reset(eth_dev); + /* Register the CPP bridge service for the secondary too */ + nfp_register_cpp_service(cpp); return 0; } -static struct rte_pci_id pci_id_nfp_net_map[] = { +static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *dev) +{ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + return nfp_pf_init(dev); + else + return nfp_pf_secondary_init(dev); +} + +static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, - PCI_DEVICE_ID_NFP6000_PF_NIC) + PCI_DEVICE_ID_NFP4000_PF_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, - PCI_DEVICE_ID_NFP6000_VF_NIC) + PCI_DEVICE_ID_NFP6000_PF_NIC) }, { .vendor_id = 0, }, }; -static struct eth_driver rte_nfp_net_pmd = { - { - .name = "rte_nfp_net_pmd", - .id_table = pci_id_nfp_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_DETACHABLE, - }, - .eth_dev_init = nfp_net_init, - .dev_private_size = sizeof(struct nfp_net_adapter), -}; - -static int -nfp_net_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) +static int nfp_pci_uninit(struct rte_eth_dev *eth_dev) { - PMD_INIT_FUNC_TRACE(); - PMD_INIT_LOG(INFO, "librte_pmd_nfp_net version %s\n", - NFP_NET_PMD_VERSION); + struct rte_pci_device *pci_dev; + uint16_t port_id; - rte_eth_driver_register(&rte_nfp_net_pmd); - return 0; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + if (pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC || + pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC) { + /* Free up all physical ports under PF */ + RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) + rte_eth_dev_close(port_id); + /* + * Ports can be closed and freed but hotplugging is not + * currently supported + */ + return -ENOTSUP; + } + + /* VF cleanup, just free private port data */ + return nfp_net_close(eth_dev); } -static struct rte_driver rte_nfp_net_driver = { - .type = PMD_PDEV, - .init = nfp_net_pmd_init, -}; +static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit); +} -PMD_REGISTER_DRIVER(rte_nfp_net_driver, nfp); -DRIVER_REGISTER_PCI_TABLE(nfp, pci_id_nfp_net_map); +static struct rte_pci_driver rte_nfp_net_pf_pmd = { + .id_table = pci_id_nfp_pf_net_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = nfp_pf_pci_probe, + .remove = eth_nfp_pci_remove, +}; +RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map); +RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); +RTE_LOG_REGISTER_SUFFIX(nfp_logtype_init, init, NOTICE); +RTE_LOG_REGISTER_SUFFIX(nfp_logtype_driver, driver, NOTICE); /* * Local variables: * c-file-style: "Linux"