X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=3aafa7f80fc942b85630502c571996d7b3cc0a92;hb=ff1097b3e5a18734a0ec5e3a4964306cf803c28f;hp=611a6ee3556ea97e7a126e5b1bf171af08cf1bf6;hpb=968e9c14f3fe51174e8cda7eb9148985f28f1bb3;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 611a6ee355..3aafa7f80f 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -1,34 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2014-2018 Netronome Systems, Inc. * All rights reserved. * * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution - * - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ /* @@ -81,12 +55,12 @@ static int nfp_net_configure(struct rte_eth_dev *dev); static void nfp_net_dev_interrupt_handler(void *param); static void nfp_net_dev_interrupt_delayed_handler(void *param); static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -static void nfp_net_infos_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); +static int nfp_net_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static int nfp_net_init(struct rte_eth_dev *eth_dev); static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void nfp_net_promisc_enable(struct rte_eth_dev *dev); -static void nfp_net_promisc_disable(struct rte_eth_dev *dev); +static int nfp_net_promisc_enable(struct rte_eth_dev *dev); +static int nfp_net_promisc_disable(struct rte_eth_dev *dev); static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq); static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx); @@ -105,7 +79,7 @@ static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, static int nfp_net_start(struct rte_eth_dev *dev); static int nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); -static void nfp_net_stats_reset(struct rte_eth_dev *dev); +static int nfp_net_stats_reset(struct rte_eth_dev *dev); static void nfp_net_stop(struct rte_eth_dev *dev); static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -119,7 +93,7 @@ static int nfp_net_rss_reta_write(struct rte_eth_dev *dev, static int nfp_net_rss_hash_write(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int nfp_set_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); /* The offset of the queue controller queues in the PCIe Target */ #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff))) @@ -407,6 +381,9 @@ nfp_net_configure(struct rte_eth_dev *dev) rxmode = &dev_conf->rxmode; txmode = &dev_conf->txmode; + if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) + rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; + /* Checking TX mode */ if (txmode->mq_mode) { PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported"); @@ -553,7 +530,7 @@ nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac) } int -nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) { struct nfp_net_hw *hw; uint32_t update, ctrl; @@ -770,7 +747,7 @@ nfp_net_start(struct rte_eth_dev *dev) return -EIO; /* - * Allocating rte mbuffs for configured rx queues. + * Allocating rte mbufs for configured rx queues. * This requires queues being enabled before */ if (nfp_net_rx_freelist_setup(dev) < 0) { @@ -844,6 +821,48 @@ nfp_net_stop(struct rte_eth_dev *dev) } } +/* Set the link up. */ +static int +nfp_net_set_link_up(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link up"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hw->is_pf) + return -ENOTSUP; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1); + else + return nfp_eth_set_configured(dev->process_private, + hw->pf_port_idx, 1); +} + +/* Set the link down. */ +static int +nfp_net_set_link_down(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link down"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hw->is_pf) + return -ENOTSUP; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0); + else + return nfp_eth_set_configured(dev->process_private, + hw->pf_port_idx, 0); +} + /* Reset and stop device. The device can not be restarted. */ static void nfp_net_close(struct rte_eth_dev *dev) @@ -889,11 +908,12 @@ nfp_net_close(struct rte_eth_dev *dev) */ } -static void +static int nfp_net_promisc_enable(struct rte_eth_dev *dev) { uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + int ret; PMD_DRV_LOG(DEBUG, "Promiscuous mode enable"); @@ -901,12 +921,12 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) { PMD_INIT_LOG(INFO, "Promiscuous mode not supported"); - return; + return -ENOTSUP; } if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { PMD_DRV_LOG(INFO, "Promiscuous mode already enabled"); - return; + return 0; } new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC; @@ -916,23 +936,27 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) * DPDK sets promiscuous mode on just after this call assuming * it can not fail ... */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; hw->ctrl = new_ctrl; + + return 0; } -static void +static int nfp_net_promisc_disable(struct rte_eth_dev *dev) { uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + int ret; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { PMD_DRV_LOG(INFO, "Promiscuous mode already disabled"); - return; + return 0; } new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC; @@ -942,10 +966,13 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) * DPDK sets promiscuous mode off just before this call * assuming it can not fail ... */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; hw->ctrl = new_ctrl; + + return 0; } /* @@ -1099,7 +1126,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return -EINVAL; } -static void +static int nfp_net_stats_reset(struct rte_eth_dev *dev) { int i; @@ -1160,9 +1187,11 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.imissed = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); + + return 0; } -static void +static int nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct nfp_net_hw *hw; @@ -1171,7 +1200,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; - dev_info->min_rx_bufsize = ETHER_MIN_MTU; + dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU; dev_info->max_rx_pktlen = hw->max_mtu; /* Next should change when PF support is implemented */ dev_info->max_mac_addrs = 1; @@ -1184,7 +1213,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH; if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; @@ -1233,6 +1263,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; + + return 0; } static const uint32_t * @@ -1370,7 +1402,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev) if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { /* If MSI-X auto-masking is used, clear the entry */ rte_wmb(); - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_ack(&pci_dev->intr_handle); } else { /* Make sure all updates are written before un-masking */ rte_wmb(); @@ -1444,7 +1476,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu)) + if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu) return -EINVAL; /* mtu setting is forbidden if port is started */ @@ -1455,7 +1487,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } /* switch to jumbo mode if needed */ - if ((uint32_t)mtu > ETHER_MAX_LEN) + if ((uint32_t)mtu > RTE_ETHER_MAX_LEN) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -1509,7 +1541,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, if (rxq == NULL) return -ENOMEM; - /* Hw queues mapping based on firmware confifguration */ + /* Hw queues mapping based on firmware configuration */ rxq->qidx = queue_idx; rxq->fl_qcidx = queue_idx * hw->stride_rx; rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1); @@ -1541,7 +1573,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, socket_id); if (tz == NULL) { - PMD_DRV_LOG(ERR, "Error allocatig rx dma"); + PMD_DRV_LOG(ERR, "Error allocating rx dma"); nfp_net_rx_queue_release(rxq); return -ENOMEM; } @@ -1928,7 +1960,7 @@ nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq) /* * RX path design: * - * There are some decissions to take: + * There are some decisions to take: * 1) How to check DD RX descriptors bit * 2) How and when to allocate new mbufs * @@ -1998,7 +2030,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_rmb(); /* - * We got a packet. Let's alloc a new mbuff for refilling the + * We got a packet. Let's alloc a new mbuf for refilling the * free descriptor ring as soon as possible */ new_mb = rte_pktmbuf_alloc(rxq->mem_pool); @@ -2013,8 +2045,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) nb_hold++; /* - * Grab the mbuff and refill the descriptor with the - * previously allocated mbuff + * Grab the mbuf and refill the descriptor with the + * previously allocated mbuf */ mb = rxb->mbuf; rxb->mbuf = new_mb; @@ -2046,7 +2078,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return -EINVAL; } - /* Filling the received mbuff with packet info */ + /* Filling the received mbuf with packet info */ if (hw->rx_offset) mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset; else @@ -2071,7 +2103,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; } - /* Adding the mbuff to the mbuff array passed by the app */ + /* Adding the mbuf to the mbuf array passed by the app */ rx_pkts[avail++] = mb; /* Now resetting and updating the descriptor */ @@ -2652,6 +2684,8 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = { .dev_configure = nfp_net_configure, .dev_start = nfp_net_start, .dev_stop = nfp_net_stop, + .dev_set_link_up = nfp_net_set_link_up, + .dev_set_link_down = nfp_net_set_link_down, .dev_close = nfp_net_close, .promiscuous_enable = nfp_net_promisc_enable, .promiscuous_disable = nfp_net_promisc_disable, @@ -2811,9 +2845,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) case PCI_DEVICE_ID_NFP6000_PF_NIC: case PCI_DEVICE_ID_NFP6000_VF_NIC: start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); - tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; + tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); - rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; + rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; break; default: PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); @@ -2861,7 +2895,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); - hw->mtu = ETHER_MTU; + hw->mtu = RTE_ETHER_MTU; /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) @@ -2904,7 +2938,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) rte_spinlock_init(&hw->reconfig_lock); /* Allocating memory for mac addr */ - eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", + RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to space for MAC address"); err = -ENOMEM; @@ -2918,16 +2953,17 @@ nfp_net_init(struct rte_eth_dev *eth_dev) nfp_net_vf_read_mac(hw); } - if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) { + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)&hw->mac_addr)) { PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); /* Using random mac addresses for VFs */ - eth_random_addr(&hw->mac_addr[0]); + rte_eth_random_addr(&hw->mac_addr[0]); nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); } /* Copying mac address to DPDK eth_dev struct */ - ether_addr_copy((struct ether_addr *)hw->mac_addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, ð_dev->data->mac_addrs[0]); if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) @@ -3257,6 +3293,7 @@ nfp_cpp_bridge_service_func(void *args) if (ret < 0) { RTE_LOG(ERR, PMD, "%s: bind error (%d). Service failed\n", __func__, errno); + close(sockfd); return ret; } @@ -3264,6 +3301,7 @@ nfp_cpp_bridge_service_func(void *args) if (ret < 0) { RTE_LOG(ERR, PMD, "%s: listen error(%d). Service failed\n", __func__, errno); + close(sockfd); return ret; } @@ -3273,6 +3311,7 @@ nfp_cpp_bridge_service_func(void *args) RTE_LOG(ERR, PMD, "%s: accept call error (%d)\n", __func__, errno); RTE_LOG(ERR, PMD, "%s: service failed\n", __func__); + close(sockfd); return -EIO; } @@ -3447,7 +3486,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); fw_f = open(fw_name, O_RDONLY); - if (fw_f > 0) + if (fw_f >= 0) goto read_fw; /* Then try the PCI name */ @@ -3456,7 +3495,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); fw_f = open(fw_name, O_RDONLY); - if (fw_f > 0) + if (fw_f >= 0) goto read_fw; /* Finally try the card type and media */ @@ -3711,16 +3750,14 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_nfp_net_pf_pmd = { .id_table = pci_id_nfp_pf_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = nfp_pf_pci_probe, .remove = eth_nfp_pci_remove, }; static struct rte_pci_driver rte_nfp_net_vf_pmd = { .id_table = pci_id_nfp_vf_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_nfp_pci_probe, .remove = eth_nfp_pci_remove, };