X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=88e3f01d6ff4f572d1a0ef382ca097df666d2b6c;hb=9c99878aa1b16de26fcce82c112b401766dd910e;hp=1b7b6c2fd491218c8af272aae4c9a726d3f62fc2;hpb=027412fe949c763fd4d536b13dcb4432f2df5534;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 1b7b6c2fd4..88e3f01d6f 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -1,34 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2014-2018 Netronome Systems, Inc. * All rights reserved. * * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution - * - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ /* @@ -81,12 +55,12 @@ static int nfp_net_configure(struct rte_eth_dev *dev); static void nfp_net_dev_interrupt_handler(void *param); static void nfp_net_dev_interrupt_delayed_handler(void *param); static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -static void nfp_net_infos_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); +static int nfp_net_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static int nfp_net_init(struct rte_eth_dev *eth_dev); static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void nfp_net_promisc_enable(struct rte_eth_dev *dev); -static void nfp_net_promisc_disable(struct rte_eth_dev *dev); +static int nfp_net_promisc_enable(struct rte_eth_dev *dev); +static int nfp_net_promisc_disable(struct rte_eth_dev *dev); static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq); static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx); @@ -105,7 +79,7 @@ static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, static int nfp_net_start(struct rte_eth_dev *dev); static int nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); -static void nfp_net_stats_reset(struct rte_eth_dev *dev); +static int nfp_net_stats_reset(struct rte_eth_dev *dev); static void nfp_net_stop(struct rte_eth_dev *dev); static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -119,7 +93,7 @@ static int nfp_net_rss_reta_write(struct rte_eth_dev *dev, static int nfp_net_rss_hash_write(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int nfp_set_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); /* The offset of the queue controller queues in the PCIe Target */ #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff))) @@ -407,6 +381,9 @@ nfp_net_configure(struct rte_eth_dev *dev) rxmode = &dev_conf->rxmode; txmode = &dev_conf->txmode; + if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) + rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; + /* Checking TX mode */ if (txmode->mq_mode) { PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported"); @@ -553,7 +530,7 @@ nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac) } int -nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) { struct nfp_net_hw *hw; uint32_t update, ctrl; @@ -770,7 +747,7 @@ nfp_net_start(struct rte_eth_dev *dev) return -EIO; /* - * Allocating rte mbuffs for configured rx queues. + * Allocating rte mbufs for configured rx queues. * This requires queues being enabled before */ if (nfp_net_rx_freelist_setup(dev) < 0) { @@ -844,6 +821,48 @@ nfp_net_stop(struct rte_eth_dev *dev) } } +/* Set the link up. */ +static int +nfp_net_set_link_up(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link up"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hw->is_pf) + return -ENOTSUP; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1); + else + return nfp_eth_set_configured(dev->process_private, + hw->pf_port_idx, 1); +} + +/* Set the link down. */ +static int +nfp_net_set_link_down(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link down"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hw->is_pf) + return -ENOTSUP; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0); + else + return nfp_eth_set_configured(dev->process_private, + hw->pf_port_idx, 0); +} + /* Reset and stop device. The device can not be restarted. */ static void nfp_net_close(struct rte_eth_dev *dev) @@ -889,11 +908,12 @@ nfp_net_close(struct rte_eth_dev *dev) */ } -static void +static int nfp_net_promisc_enable(struct rte_eth_dev *dev) { uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + int ret; PMD_DRV_LOG(DEBUG, "Promiscuous mode enable"); @@ -901,12 +921,12 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) { PMD_INIT_LOG(INFO, "Promiscuous mode not supported"); - return; + return -ENOTSUP; } if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { PMD_DRV_LOG(INFO, "Promiscuous mode already enabled"); - return; + return 0; } new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC; @@ -916,23 +936,27 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) * DPDK sets promiscuous mode on just after this call assuming * it can not fail ... */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; hw->ctrl = new_ctrl; + + return 0; } -static void +static int nfp_net_promisc_disable(struct rte_eth_dev *dev) { uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + int ret; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { PMD_DRV_LOG(INFO, "Promiscuous mode already disabled"); - return; + return 0; } new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC; @@ -942,10 +966,13 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) * DPDK sets promiscuous mode off just before this call * assuming it can not fail ... */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; hw->ctrl = new_ctrl; + + return 0; } /* @@ -1099,7 +1126,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return -EINVAL; } -static void +static int nfp_net_stats_reset(struct rte_eth_dev *dev) { int i; @@ -1160,9 +1187,11 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.imissed = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); + + return 0; } -static void +static int nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct nfp_net_hw *hw; @@ -1171,7 +1200,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; - dev_info->min_rx_bufsize = ETHER_MIN_MTU; + dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU; dev_info->max_rx_pktlen = hw->max_mtu; /* Next should change when PF support is implemented */ dev_info->max_mac_addrs = 1; @@ -1184,7 +1213,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH; if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; @@ -1233,6 +1263,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; + + return 0; } static const uint32_t * @@ -1345,9 +1377,9 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev) PMD_DRV_LOG(INFO, " Port %d: Link Down", dev->data->port_id); - PMD_DRV_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", - pci_dev->addr.domain, pci_dev->addr.bus, - pci_dev->addr.devid, pci_dev->addr.function); + PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); } /* Interrupt configuration and handling */ @@ -1370,7 +1402,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev) if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { /* If MSI-X auto-masking is used, clear the entry */ rte_wmb(); - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_ack(&pci_dev->intr_handle); } else { /* Make sure all updates are written before un-masking */ rte_wmb(); @@ -1444,7 +1476,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu)) + if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu) return -EINVAL; /* mtu setting is forbidden if port is started */ @@ -1455,7 +1487,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } /* switch to jumbo mode if needed */ - if ((uint32_t)mtu > ETHER_MAX_LEN) + if ((uint32_t)mtu > RTE_ETHER_MAX_LEN) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -1509,7 +1541,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, if (rxq == NULL) return -ENOMEM; - /* Hw queues mapping based on firmware confifguration */ + /* Hw queues mapping based on firmware configuration */ rxq->qidx = queue_idx; rxq->fl_qcidx = queue_idx * hw->stride_rx; rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1); @@ -1541,7 +1573,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, socket_id); if (tz == NULL) { - PMD_DRV_LOG(ERR, "Error allocatig rx dma"); + PMD_DRV_LOG(ERR, "Error allocating rx dma"); nfp_net_rx_queue_release(rxq); return -ENOMEM; } @@ -1928,7 +1960,7 @@ nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq) /* * RX path design: * - * There are some decissions to take: + * There are some decisions to take: * 1) How to check DD RX descriptors bit * 2) How and when to allocate new mbufs * @@ -1998,7 +2030,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_rmb(); /* - * We got a packet. Let's alloc a new mbuff for refilling the + * We got a packet. Let's alloc a new mbuf for refilling the * free descriptor ring as soon as possible */ new_mb = rte_pktmbuf_alloc(rxq->mem_pool); @@ -2013,8 +2045,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) nb_hold++; /* - * Grab the mbuff and refill the descriptor with the - * previously allocated mbuff + * Grab the mbuf and refill the descriptor with the + * previously allocated mbuf */ mb = rxb->mbuf; rxb->mbuf = new_mb; @@ -2046,7 +2078,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return -EINVAL; } - /* Filling the received mbuff with packet info */ + /* Filling the received mbuf with packet info */ if (hw->rx_offset) mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset; else @@ -2071,7 +2103,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; } - /* Adding the mbuff to the mbuff array passed by the app */ + /* Adding the mbuf to the mbuf array passed by the app */ rx_pkts[avail++] = mb; /* Now resetting and updating the descriptor */ @@ -2465,7 +2497,7 @@ nfp_net_reta_query(struct rte_eth_dev *dev, for (j = 0; j < 4; j++) { if (!(mask & (0x1 << j))) continue; - reta_conf->reta[shift + j] = + reta_conf[idx].reta[shift + j] = (uint8_t)((reta >> (8 * j)) & 0xF); } } @@ -2652,6 +2684,8 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = { .dev_configure = nfp_net_configure, .dev_start = nfp_net_start, .dev_stop = nfp_net_stop, + .dev_set_link_up = nfp_net_set_link_up, + .dev_set_link_down = nfp_net_set_link_down, .dev_close = nfp_net_close, .promiscuous_enable = nfp_net_promisc_enable, .promiscuous_disable = nfp_net_promisc_disable, @@ -2811,9 +2845,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) case PCI_DEVICE_ID_NFP6000_PF_NIC: case PCI_DEVICE_ID_NFP6000_VF_NIC: start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); - tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; + tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); - rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; + rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; break; default: PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); @@ -2861,7 +2895,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); - hw->mtu = ETHER_MTU; + hw->mtu = RTE_ETHER_MTU; /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) @@ -2904,7 +2938,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) rte_spinlock_init(&hw->reconfig_lock); /* Allocating memory for mac addr */ - eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", + RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to space for MAC address"); err = -ENOMEM; @@ -2918,16 +2953,17 @@ nfp_net_init(struct rte_eth_dev *eth_dev) nfp_net_vf_read_mac(hw); } - if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) { + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)&hw->mac_addr)) { PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); /* Using random mac addresses for VFs */ - eth_random_addr(&hw->mac_addr[0]); + rte_eth_random_addr(&hw->mac_addr[0]); nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); } /* Copying mac address to DPDK eth_dev struct */ - ether_addr_copy((struct ether_addr *)hw->mac_addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, ð_dev->data->mac_addrs[0]); if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) @@ -2978,7 +3014,7 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) size_t count, curlen, totlen = 0; int err = 0; - PMD_CPP_LOG(DEBUG, "%s: offset size %lu, count_size: %lu\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__, sizeof(off_t), sizeof(size_t)); /* Reading the count param */ @@ -2997,9 +3033,9 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) cpp_id = (offset >> 40) << 8; nfp_offset = offset & ((1ull << 40) - 1); - PMD_CPP_LOG(DEBUG, "%s: count %lu and offset %ld\n", __func__, count, + PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count, offset); - PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %ld\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__, cpp_id, nfp_offset); /* Adjust length if not aligned */ @@ -3031,12 +3067,12 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) if (len > sizeof(tmpbuf)) len = sizeof(tmpbuf); - PMD_CPP_LOG(DEBUG, "%s: Receive %u of %lu\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu\n", __func__, len, count); err = recv(sockfd, tmpbuf, len, MSG_WAITALL); if (err != (int)len) { RTE_LOG(ERR, PMD, - "%s: error when receiving, %d of %lu\n", + "%s: error when receiving, %d of %zu\n", __func__, err, count); nfp_cpp_area_release(area); nfp_cpp_area_free(area); @@ -3080,7 +3116,7 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) size_t count, curlen, totlen = 0; int err = 0; - PMD_CPP_LOG(DEBUG, "%s: offset size %lu, count_size: %lu\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__, sizeof(off_t), sizeof(size_t)); /* Reading the count param */ @@ -3099,9 +3135,9 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) cpp_id = (offset >> 40) << 8; nfp_offset = offset & ((1ull << 40) - 1); - PMD_CPP_LOG(DEBUG, "%s: count %lu and offset %ld\n", __func__, count, + PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count, offset); - PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %ld\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__, cpp_id, nfp_offset); /* Adjust length if not aligned */ @@ -3138,13 +3174,13 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) nfp_cpp_area_free(area); return -EIO; } - PMD_CPP_LOG(DEBUG, "%s: sending %u of %lu\n", __func__, + PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu\n", __func__, len, count); err = send(sockfd, tmpbuf, len, 0); if (err != (int)len) { RTE_LOG(ERR, PMD, - "%s: error when sending: %d of %lu\n", + "%s: error when sending: %d of %zu\n", __func__, err, count); nfp_cpp_area_release(area); nfp_cpp_area_free(area); @@ -3257,6 +3293,7 @@ nfp_cpp_bridge_service_func(void *args) if (ret < 0) { RTE_LOG(ERR, PMD, "%s: bind error (%d). Service failed\n", __func__, errno); + close(sockfd); return ret; } @@ -3264,6 +3301,7 @@ nfp_cpp_bridge_service_func(void *args) if (ret < 0) { RTE_LOG(ERR, PMD, "%s: listen error(%d). Service failed\n", __func__, errno); + close(sockfd); return ret; } @@ -3273,6 +3311,7 @@ nfp_cpp_bridge_service_func(void *args) RTE_LOG(ERR, PMD, "%s: accept call error (%d)\n", __func__, errno); RTE_LOG(ERR, PMD, "%s: service failed\n", __func__); + close(sockfd); return -EIO; } @@ -3321,9 +3360,9 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, return -ENOMEM; if (ports > 1) - sprintf(port_name, "%s_port%d", dev->device.name, port); + snprintf(port_name, 100, "%s_port%d", dev->device.name, port); else - sprintf(port_name, "%s", dev->device.name); + strlcat(port_name, dev->device.name, 100); if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -3412,9 +3451,10 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, probe_failed: rte_free(port_name); /* free ports private data if primary process */ - if (rte_eal_process_type() == RTE_PROC_PRIMARY) + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { rte_free(eth_dev->data->dev_private); - + eth_dev->data->dev_private = NULL; + } rte_eth_dev_release_port(eth_dev); return retval; @@ -3436,28 +3476,31 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) /* Looking for firmware file in order of priority */ /* First try to find a firmware image specific for this device */ - sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", + snprintf(serial, sizeof(serial), + "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3], cpp->serial[4], cpp->serial[5], cpp->interface >> 8, cpp->interface & 0xff); - sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial); + snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, + serial); PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); fw_f = open(fw_name, O_RDONLY); - if (fw_f > 0) + if (fw_f >= 0) goto read_fw; /* Then try the PCI name */ - sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name); + snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, + dev->device.name); PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); fw_f = open(fw_name, O_RDONLY); - if (fw_f > 0) + if (fw_f >= 0) goto read_fw; /* Finally try the card type and media */ - sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card); + snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); fw_f = open(fw_name, O_RDONLY); if (fw_f < 0) { @@ -3533,8 +3576,9 @@ nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp, PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); - sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model, - nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000); + snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", + nfp_fw_model, nfp_eth_table->count, + nfp_eth_table->ports[0].speed / 1000); nsp = nfp_nsp_open(cpp); if (!nsp) { @@ -3639,9 +3683,6 @@ error: return ret; } -int nfp_logtype_init; -int nfp_logtype_driver; - static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, @@ -3707,16 +3748,14 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_nfp_net_pf_pmd = { .id_table = pci_id_nfp_pf_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = nfp_pf_pci_probe, .remove = eth_nfp_pci_remove, }; static struct rte_pci_driver rte_nfp_net_vf_pmd = { .id_table = pci_id_nfp_vf_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_nfp_pci_probe, .remove = eth_nfp_pci_remove, }; @@ -3727,16 +3766,8 @@ RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map); RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map); RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio"); - -RTE_INIT(nfp_init_log) -{ - nfp_logtype_init = rte_log_register("pmd.net.nfp.init"); - if (nfp_logtype_init >= 0) - rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE); - nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver"); - if (nfp_logtype_driver >= 0) - rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE); -} +RTE_LOG_REGISTER(nfp_logtype_init, pmd.net.nfp.init, NOTICE); +RTE_LOG_REGISTER(nfp_logtype_driver, pmd.net.nfp.driver, NOTICE); /* * Local variables: * c-file-style: "Linux"