X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=2d20d4ce94378a32bb5e4f66bb18a02cf2949c2b;hb=73cd23dd87e9a870421f260fa61a8fe2741a50ba;hp=f5d33efcfadbe9db881cb8263ee2d8e1de2a83de;hpb=b76fafb174d2cd5247c3573bb3d49444e195e760;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index f5d33efcfa..2d20d4ce94 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -1,34 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2014-2018 Netronome Systems, Inc. * All rights reserved. * * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution - * - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ /* @@ -81,12 +55,12 @@ static int nfp_net_configure(struct rte_eth_dev *dev); static void nfp_net_dev_interrupt_handler(void *param); static void nfp_net_dev_interrupt_delayed_handler(void *param); static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -static void nfp_net_infos_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); +static int nfp_net_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static int nfp_net_init(struct rte_eth_dev *eth_dev); static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void nfp_net_promisc_enable(struct rte_eth_dev *dev); -static void nfp_net_promisc_disable(struct rte_eth_dev *dev); +static int nfp_net_promisc_enable(struct rte_eth_dev *dev); +static int nfp_net_promisc_disable(struct rte_eth_dev *dev); static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq); static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx); @@ -105,7 +79,7 @@ static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, static int nfp_net_start(struct rte_eth_dev *dev); static int nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); -static void nfp_net_stats_reset(struct rte_eth_dev *dev); +static int nfp_net_stats_reset(struct rte_eth_dev *dev); static void nfp_net_stop(struct rte_eth_dev *dev); static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -407,6 +381,9 @@ nfp_net_configure(struct rte_eth_dev *dev) rxmode = &dev_conf->rxmode; txmode = &dev_conf->txmode; + if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) + rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; + /* Checking TX mode */ if (txmode->mq_mode) { PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported"); @@ -931,11 +908,12 @@ nfp_net_close(struct rte_eth_dev *dev) */ } -static void +static int nfp_net_promisc_enable(struct rte_eth_dev *dev) { uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + int ret; PMD_DRV_LOG(DEBUG, "Promiscuous mode enable"); @@ -943,12 +921,12 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) { PMD_INIT_LOG(INFO, "Promiscuous mode not supported"); - return; + return -ENOTSUP; } if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { PMD_DRV_LOG(INFO, "Promiscuous mode already enabled"); - return; + return 0; } new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC; @@ -958,23 +936,27 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) * DPDK sets promiscuous mode on just after this call assuming * it can not fail ... */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; hw->ctrl = new_ctrl; + + return 0; } -static void +static int nfp_net_promisc_disable(struct rte_eth_dev *dev) { uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + int ret; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { PMD_DRV_LOG(INFO, "Promiscuous mode already disabled"); - return; + return 0; } new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC; @@ -984,10 +966,13 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) * DPDK sets promiscuous mode off just before this call * assuming it can not fail ... */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; hw->ctrl = new_ctrl; + + return 0; } /* @@ -1141,7 +1126,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return -EINVAL; } -static void +static int nfp_net_stats_reset(struct rte_eth_dev *dev) { int i; @@ -1202,9 +1187,11 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.imissed = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); + + return 0; } -static void +static int nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct nfp_net_hw *hw; @@ -1226,7 +1213,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH; if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; @@ -1275,6 +1263,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; + + return 0; } static const uint32_t * @@ -1387,9 +1377,9 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev) PMD_DRV_LOG(INFO, " Port %d: Link Down", dev->data->port_id); - PMD_DRV_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", - pci_dev->addr.domain, pci_dev->addr.bus, - pci_dev->addr.devid, pci_dev->addr.function); + PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); } /* Interrupt configuration and handling */ @@ -1412,7 +1402,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev) if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { /* If MSI-X auto-masking is used, clear the entry */ rte_wmb(); - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_ack(&pci_dev->intr_handle); } else { /* Make sure all updates are written before un-masking */ rte_wmb(); @@ -1470,7 +1460,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param) struct rte_eth_dev *dev = (struct rte_eth_dev *)param; nfp_net_link_update(dev, 0); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); nfp_net_dev_link_status_print(dev); @@ -2363,11 +2353,6 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); new_ctrl = 0; - if ((mask & ETH_VLAN_FILTER_OFFLOAD) || - (mask & ETH_VLAN_EXTEND_OFFLOAD)) - PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or" - " ETH_VLAN_EXTEND_OFFLOAD"); - /* Enable vlan strip if it is not configured yet */ if ((mask & ETH_VLAN_STRIP_OFFLOAD) && !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) @@ -2636,6 +2621,9 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP; + /* Propagate current RSS hash functions to caller */ + rss_conf->rss_hf = rss_hf; + /* Reading the key size */ rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ); @@ -2713,7 +2701,6 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = { .rss_hash_conf_get = nfp_net_rss_hash_conf_get, .rx_queue_setup = nfp_net_rx_queue_setup, .rx_queue_release = nfp_net_rx_queue_release, - .rx_queue_count = nfp_net_rx_queue_count, .tx_queue_setup = nfp_net_tx_queue_setup, .tx_queue_release = nfp_net_tx_queue_release, .rx_queue_intr_enable = nfp_rx_queue_intr_enable, @@ -2797,6 +2784,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) } eth_dev->dev_ops = &nfp_net_eth_dev_ops; + eth_dev->rx_queue_count = nfp_net_rx_queue_count; eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts; @@ -3024,7 +3012,7 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) size_t count, curlen, totlen = 0; int err = 0; - PMD_CPP_LOG(DEBUG, "%s: offset size %lu, count_size: %lu\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__, sizeof(off_t), sizeof(size_t)); /* Reading the count param */ @@ -3043,9 +3031,9 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) cpp_id = (offset >> 40) << 8; nfp_offset = offset & ((1ull << 40) - 1); - PMD_CPP_LOG(DEBUG, "%s: count %lu and offset %ld\n", __func__, count, + PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count, offset); - PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %ld\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__, cpp_id, nfp_offset); /* Adjust length if not aligned */ @@ -3077,12 +3065,12 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) if (len > sizeof(tmpbuf)) len = sizeof(tmpbuf); - PMD_CPP_LOG(DEBUG, "%s: Receive %u of %lu\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu\n", __func__, len, count); err = recv(sockfd, tmpbuf, len, MSG_WAITALL); if (err != (int)len) { RTE_LOG(ERR, PMD, - "%s: error when receiving, %d of %lu\n", + "%s: error when receiving, %d of %zu\n", __func__, err, count); nfp_cpp_area_release(area); nfp_cpp_area_free(area); @@ -3126,7 +3114,7 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) size_t count, curlen, totlen = 0; int err = 0; - PMD_CPP_LOG(DEBUG, "%s: offset size %lu, count_size: %lu\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__, sizeof(off_t), sizeof(size_t)); /* Reading the count param */ @@ -3145,9 +3133,9 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) cpp_id = (offset >> 40) << 8; nfp_offset = offset & ((1ull << 40) - 1); - PMD_CPP_LOG(DEBUG, "%s: count %lu and offset %ld\n", __func__, count, + PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count, offset); - PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %ld\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__, cpp_id, nfp_offset); /* Adjust length if not aligned */ @@ -3184,13 +3172,13 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) nfp_cpp_area_free(area); return -EIO; } - PMD_CPP_LOG(DEBUG, "%s: sending %u of %lu\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu\n", __func__, len, count); err = send(sockfd, tmpbuf, len, 0); if (err != (int)len) { RTE_LOG(ERR, PMD, - "%s: error when sending: %d of %lu\n", + "%s: error when sending: %d of %zu\n", __func__, err, count); nfp_cpp_area_release(area); nfp_cpp_area_free(area); @@ -3461,9 +3449,10 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, probe_failed: rte_free(port_name); /* free ports private data if primary process */ - if (rte_eal_process_type() == RTE_PROC_PRIMARY) + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { rte_free(eth_dev->data->dev_private); - + eth_dev->data->dev_private = NULL; + } rte_eth_dev_release_port(eth_dev); return retval; @@ -3625,7 +3614,7 @@ static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, * interface. Here we avoid this telling to the CPP init code to * use a lock file if UIO is being used. */ - if (dev->kdrv == RTE_KDRV_VFIO) + if (dev->kdrv == RTE_PCI_KDRV_VFIO) cpp = nfp_cpp_from_device_name(dev, 0); else cpp = nfp_cpp_from_device_name(dev, 1); @@ -3692,9 +3681,6 @@ error: return ret; } -int nfp_logtype_init; -int nfp_logtype_driver; - static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, @@ -3778,16 +3764,8 @@ RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map); RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map); RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio"); - -RTE_INIT(nfp_init_log) -{ - nfp_logtype_init = rte_log_register("pmd.net.nfp.init"); - if (nfp_logtype_init >= 0) - rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE); - nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver"); - if (nfp_logtype_driver >= 0) - rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE); -} +RTE_LOG_REGISTER(nfp_logtype_init, pmd.net.nfp.init, NOTICE); +RTE_LOG_REGISTER(nfp_logtype_driver, pmd.net.nfp.driver, NOTICE); /* * Local variables: * c-file-style: "Linux"