X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=22a8b2d19e8b4f499b9f1505335b0635dbba1853;hb=fb33ac033a87398ccce9b4a8624485be345fdc46;hp=fa7722a47f7b5de8e582d75960a12df2171caa73;hpb=761186fc7ba7fff191f2916734da82e134a9d0a1;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index fa7722a47f..22a8b2d19e 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -81,12 +81,12 @@ static int nfp_net_configure(struct rte_eth_dev *dev); static void nfp_net_dev_interrupt_handler(void *param); static void nfp_net_dev_interrupt_delayed_handler(void *param); static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -static void nfp_net_infos_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); +static int nfp_net_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static int nfp_net_init(struct rte_eth_dev *eth_dev); static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void nfp_net_promisc_enable(struct rte_eth_dev *dev); -static void nfp_net_promisc_disable(struct rte_eth_dev *dev); +static int nfp_net_promisc_enable(struct rte_eth_dev *dev); +static int nfp_net_promisc_disable(struct rte_eth_dev *dev); static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq); static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx); @@ -105,7 +105,7 @@ static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, static int nfp_net_start(struct rte_eth_dev *dev); static int nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); -static void nfp_net_stats_reset(struct rte_eth_dev *dev); +static int nfp_net_stats_reset(struct rte_eth_dev *dev); static void nfp_net_stop(struct rte_eth_dev *dev); static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -119,7 +119,7 @@ static int nfp_net_rss_reta_write(struct rte_eth_dev *dev, static int nfp_net_rss_hash_write(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int nfp_set_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); /* The offset of the queue controller queues in the PCIe Target */ #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff))) @@ -553,7 +553,7 @@ nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac) } int -nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) { struct nfp_net_hw *hw; uint32_t update, ctrl; @@ -770,7 +770,7 @@ nfp_net_start(struct rte_eth_dev *dev) return -EIO; /* - * Allocating rte mbuffs for configured rx queues. + * Allocating rte mbufs for configured rx queues. * This requires queues being enabled before */ if (nfp_net_rx_freelist_setup(dev) < 0) { @@ -844,6 +844,48 @@ nfp_net_stop(struct rte_eth_dev *dev) } } +/* Set the link up. */ +static int +nfp_net_set_link_up(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link up"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hw->is_pf) + return -ENOTSUP; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1); + else + return nfp_eth_set_configured(dev->process_private, + hw->pf_port_idx, 1); +} + +/* Set the link down. */ +static int +nfp_net_set_link_down(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link down"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hw->is_pf) + return -ENOTSUP; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0); + else + return nfp_eth_set_configured(dev->process_private, + hw->pf_port_idx, 0); +} + /* Reset and stop device. The device can not be restarted. */ static void nfp_net_close(struct rte_eth_dev *dev) @@ -889,11 +931,12 @@ nfp_net_close(struct rte_eth_dev *dev) */ } -static void +static int nfp_net_promisc_enable(struct rte_eth_dev *dev) { uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + int ret; PMD_DRV_LOG(DEBUG, "Promiscuous mode enable"); @@ -901,12 +944,12 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) { PMD_INIT_LOG(INFO, "Promiscuous mode not supported"); - return; + return -ENOTSUP; } if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { PMD_DRV_LOG(INFO, "Promiscuous mode already enabled"); - return; + return 0; } new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC; @@ -916,23 +959,27 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) * DPDK sets promiscuous mode on just after this call assuming * it can not fail ... */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; hw->ctrl = new_ctrl; + + return 0; } -static void +static int nfp_net_promisc_disable(struct rte_eth_dev *dev) { uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + int ret; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { PMD_DRV_LOG(INFO, "Promiscuous mode already disabled"); - return; + return 0; } new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC; @@ -942,10 +989,13 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) * DPDK sets promiscuous mode off just before this call * assuming it can not fail ... */ - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return; + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; hw->ctrl = new_ctrl; + + return 0; } /* @@ -1099,7 +1149,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return -EINVAL; } -static void +static int nfp_net_stats_reset(struct rte_eth_dev *dev) { int i; @@ -1160,9 +1210,11 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.imissed = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); + + return 0; } -static void +static int nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct nfp_net_hw *hw; @@ -1171,7 +1223,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; - dev_info->min_rx_bufsize = ETHER_MIN_MTU; + dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU; dev_info->max_rx_pktlen = hw->max_mtu; /* Next should change when PF support is implemented */ dev_info->max_mac_addrs = 1; @@ -1233,6 +1285,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; + + return 0; } static const uint32_t * @@ -1370,7 +1424,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev) if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { /* If MSI-X auto-masking is used, clear the entry */ rte_wmb(); - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_ack(&pci_dev->intr_handle); } else { /* Make sure all updates are written before un-masking */ rte_wmb(); @@ -1444,7 +1498,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu)) + if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu) return -EINVAL; /* mtu setting is forbidden if port is started */ @@ -1455,7 +1509,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } /* switch to jumbo mode if needed */ - if ((uint32_t)mtu > ETHER_MAX_LEN) + if ((uint32_t)mtu > RTE_ETHER_MAX_LEN) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -1509,7 +1563,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, if (rxq == NULL) return -ENOMEM; - /* Hw queues mapping based on firmware confifguration */ + /* Hw queues mapping based on firmware configuration */ rxq->qidx = queue_idx; rxq->fl_qcidx = queue_idx * hw->stride_rx; rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1); @@ -1541,7 +1595,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, socket_id); if (tz == NULL) { - PMD_DRV_LOG(ERR, "Error allocatig rx dma"); + PMD_DRV_LOG(ERR, "Error allocating rx dma"); nfp_net_rx_queue_release(rxq); return -ENOMEM; } @@ -1928,7 +1982,7 @@ nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq) /* * RX path design: * - * There are some decissions to take: + * There are some decisions to take: * 1) How to check DD RX descriptors bit * 2) How and when to allocate new mbufs * @@ -1998,7 +2052,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_rmb(); /* - * We got a packet. Let's alloc a new mbuff for refilling the + * We got a packet. Let's alloc a new mbuf for refilling the * free descriptor ring as soon as possible */ new_mb = rte_pktmbuf_alloc(rxq->mem_pool); @@ -2013,8 +2067,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) nb_hold++; /* - * Grab the mbuff and refill the descriptor with the - * previously allocated mbuff + * Grab the mbuf and refill the descriptor with the + * previously allocated mbuf */ mb = rxb->mbuf; rxb->mbuf = new_mb; @@ -2046,7 +2100,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return -EINVAL; } - /* Filling the received mbuff with packet info */ + /* Filling the received mbuf with packet info */ if (hw->rx_offset) mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset; else @@ -2071,7 +2125,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; } - /* Adding the mbuff to the mbuff array passed by the app */ + /* Adding the mbuf to the mbuf array passed by the app */ rx_pkts[avail++] = mb; /* Now resetting and updating the descriptor */ @@ -2652,6 +2706,8 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = { .dev_configure = nfp_net_configure, .dev_start = nfp_net_start, .dev_stop = nfp_net_stop, + .dev_set_link_up = nfp_net_set_link_up, + .dev_set_link_down = nfp_net_set_link_down, .dev_close = nfp_net_close, .promiscuous_enable = nfp_net_promisc_enable, .promiscuous_disable = nfp_net_promisc_disable, @@ -2811,9 +2867,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) case PCI_DEVICE_ID_NFP6000_PF_NIC: case PCI_DEVICE_ID_NFP6000_VF_NIC: start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); - tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; + tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); - rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; + rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; break; default: PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); @@ -2861,7 +2917,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); - hw->mtu = ETHER_MTU; + hw->mtu = RTE_ETHER_MTU; /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) @@ -2904,7 +2960,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) rte_spinlock_init(&hw->reconfig_lock); /* Allocating memory for mac addr */ - eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", + RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to space for MAC address"); err = -ENOMEM; @@ -2918,16 +2975,17 @@ nfp_net_init(struct rte_eth_dev *eth_dev) nfp_net_vf_read_mac(hw); } - if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) { + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)&hw->mac_addr)) { PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); /* Using random mac addresses for VFs */ - eth_random_addr(&hw->mac_addr[0]); + rte_eth_random_addr(&hw->mac_addr[0]); nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); } /* Copying mac address to DPDK eth_dev struct */ - ether_addr_copy((struct ether_addr *)hw->mac_addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, ð_dev->data->mac_addrs[0]); if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) @@ -3257,6 +3315,7 @@ nfp_cpp_bridge_service_func(void *args) if (ret < 0) { RTE_LOG(ERR, PMD, "%s: bind error (%d). Service failed\n", __func__, errno); + close(sockfd); return ret; } @@ -3264,6 +3323,7 @@ nfp_cpp_bridge_service_func(void *args) if (ret < 0) { RTE_LOG(ERR, PMD, "%s: listen error(%d). Service failed\n", __func__, errno); + close(sockfd); return ret; } @@ -3273,6 +3333,7 @@ nfp_cpp_bridge_service_func(void *args) RTE_LOG(ERR, PMD, "%s: accept call error (%d)\n", __func__, errno); RTE_LOG(ERR, PMD, "%s: service failed\n", __func__); + close(sockfd); return -EIO; } @@ -3321,9 +3382,9 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, return -ENOMEM; if (ports > 1) - sprintf(port_name, "%s_port%d", dev->device.name, port); + snprintf(port_name, 100, "%s_port%d", dev->device.name, port); else - sprintf(port_name, "%s", dev->device.name); + strlcat(port_name, dev->device.name, 100); if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -3436,28 +3497,31 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) /* Looking for firmware file in order of priority */ /* First try to find a firmware image specific for this device */ - sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", + snprintf(serial, sizeof(serial), + "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3], cpp->serial[4], cpp->serial[5], cpp->interface >> 8, cpp->interface & 0xff); - sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial); + snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, + serial); PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); fw_f = open(fw_name, O_RDONLY); - if (fw_f > 0) + if (fw_f >= 0) goto read_fw; /* Then try the PCI name */ - sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name); + snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, + dev->device.name); PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); fw_f = open(fw_name, O_RDONLY); - if (fw_f > 0) + if (fw_f >= 0) goto read_fw; /* Finally try the card type and media */ - sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card); + snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); fw_f = open(fw_name, O_RDONLY); if (fw_f < 0) { @@ -3533,8 +3597,9 @@ nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp, PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); - sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model, - nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000); + snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", + nfp_fw_model, nfp_eth_table->count, + nfp_eth_table->ports[0].speed / 1000); nsp = nfp_nsp_open(cpp); if (!nsp) { @@ -3707,16 +3772,14 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_nfp_net_pf_pmd = { .id_table = pci_id_nfp_pf_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = nfp_pf_pci_probe, .remove = eth_nfp_pci_remove, }; static struct rte_pci_driver rte_nfp_net_vf_pmd = { .id_table = pci_id_nfp_vf_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_nfp_pci_probe, .remove = eth_nfp_pci_remove, };