X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=869c55ce28037d7166dee18b8513e445849b813c;hb=3ab3671b0b3805344de2954dbbac147e59c940da;hp=92ae48a060524d469969b607a8ba7488c2c4e548;hpb=89b890df3fbc12f5349d8fc84acbe46f8e79ff2d;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 92ae48a060..869c55ce28 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -54,6 +54,7 @@ #include #include #include +#include #include "nfp_net_pmd.h" #include "nfp_net_logs.h" @@ -213,7 +214,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, + dev->driver->pci_drv.driver.name, ring_name, dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); @@ -322,7 +323,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) for (i = 0; i < txq->tx_count; i++) { if (txq->txbufs[i].mbuf) { - rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); + rte_pktmbuf_free(txq->txbufs[i].mbuf); txq->txbufs[i].mbuf = NULL; } } @@ -407,6 +408,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n", ctrl, update); + rte_spinlock_lock(&hw->reconfig_lock); + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl); nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update); @@ -414,6 +417,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) err = __nfp_net_reconfig(hw, update); + rte_spinlock_unlock(&hw->reconfig_lock); + if (!err) return 0; @@ -602,18 +607,8 @@ nfp_net_rx_freelist_setup(struct rte_eth_dev *dev) static void nfp_net_params_setup(struct nfp_net_hw *hw) { - uint32_t *mac_address; - nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu); nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz); - - /* A MAC address is 8 bytes long */ - mac_address = (uint32_t *)(hw->mac_addr); - - nn_cfg_writel(hw, NFP_NET_CFG_MACADDR, - rte_cpu_to_be_32(*mac_address)); - nn_cfg_writel(hw, NFP_NET_CFG_MACADDR + 4, - rte_cpu_to_be_32(*(mac_address + 4))); } static void @@ -622,6 +617,17 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw) hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; } +static void nfp_net_read_mac(struct nfp_net_hw *hw) +{ + uint32_t tmp; + + tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR)); + memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr)); + + tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4)); + memcpy(&hw->mac_addr[4], &tmp, 2); +} + static int nfp_net_start(struct rte_eth_dev *dev) { @@ -712,10 +718,12 @@ static void nfp_net_close(struct rte_eth_dev *dev) { struct nfp_net_hw *hw; + struct rte_pci_device *pci_dev; PMD_INIT_LOG(DEBUG, "Close\n"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = RTE_DEV_TO_PCI(dev->device); /* * We assume that the DPDK application is stopping all the @@ -724,9 +732,14 @@ nfp_net_close(struct rte_eth_dev *dev) nfp_net_stop(dev); - rte_intr_disable(&dev->pci_dev->intr_handle); + rte_intr_disable(&pci_dev->intr_handle); nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); + /* unregister callback func from eal lib */ + rte_intr_callback_unregister(&pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, + (void *)dev); + /* * The ixgbe PMD driver disables the pcie master on the * device. The i40e does not... @@ -805,6 +818,17 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) struct rte_eth_link link, old; uint32_t nn_link_status; + static const uint32_t ls_to_ethtool[] = { + [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE, + [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE, + [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G, + [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G, + [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G, + [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G, + [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G, + [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G, + }; + PMD_DRV_LOG(DEBUG, "Link update\n"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -817,11 +841,24 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) memset(&link, 0, sizeof(struct rte_eth_link)); if (nn_link_status & NFP_NET_CFG_STS_LINK) - link.link_status = 1; + link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; - /* Other cards can limit the tx and rx rate per VF */ - link.link_speed = ETH_LINK_SPEED_40G; + + nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) & + NFP_NET_CFG_STS_LINK_RATE_MASK; + + if ((NFD_CFG_MAJOR_VERSION_of(hw->ver) < 4) || + ((NFD_CFG_MINOR_VERSION_of(hw->ver) == 4) && + (NFD_CFG_MINOR_VERSION_of(hw->ver) == 0))) + /* We really do not know the speed wil old firmware */ + link.link_speed = ETH_SPEED_NUM_NONE; + else { + if (nn_link_status >= RTE_DIM(ls_to_ethtool)) + link.link_speed = ETH_SPEED_NUM_NONE; + else + link.link_speed = ls_to_ethtool[nn_link_status]; + } if (old.link_status != link.link_status) { nfp_net_dev_atomic_write_link_status(dev, &link); @@ -902,11 +939,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) nfp_dev_stats.obytes -= hw->eth_stats_base.obytes; - nfp_dev_stats.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - - nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts; - /* reading general device stats */ nfp_dev_stats.ierrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); @@ -918,12 +950,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors; - /* Multicast frames received */ - nfp_dev_stats.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - - nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts; - /* RX ring mbuf allocation failures */ nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed; @@ -985,9 +1011,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.obytes = nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); - hw->eth_stats_base.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - /* reading general device stats */ hw->eth_stats_base.ierrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); @@ -995,10 +1018,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.oerrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); - /* Multicast frames received */ - hw->eth_stats_base.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - /* RX ring mbuf allocation failures */ dev->data->rx_mbuf_alloc_failed = 0; @@ -1013,7 +1032,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->driver_name = dev->driver->pci_drv.name; + dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; dev_info->min_rx_bufsize = ETHER_MIN_MTU; @@ -1034,8 +1053,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -1061,6 +1080,10 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; + + dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G | + ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G | + ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G; } static const uint32_t * @@ -1127,6 +1150,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) static void nfp_net_dev_link_status_print(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); struct rte_eth_link link; memset(&link, 0, sizeof(link)); @@ -1141,8 +1165,8 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev) (int)(dev->data->port_id)); RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n", - dev->pci_dev->addr.domain, dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, dev->pci_dev->addr.function); + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); } /* Interrupt configuration and handling */ @@ -1157,13 +1181,15 @@ static void nfp_net_irq_unmask(struct rte_eth_dev *dev) { struct nfp_net_hw *hw; + struct rte_pci_device *pci_dev; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = RTE_DEV_TO_PCI(dev->device); if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { /* If MSI-X auto-masking is used, clear the entry */ rte_wmb(); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(&pci_dev->intr_handle); } else { /* Make sure all updates are written before un-masking */ rte_wmb(); @@ -1224,7 +1250,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param) struct rte_eth_dev *dev = (struct rte_eth_dev *)param; nfp_net_link_update(dev, 0); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); nfp_net_dev_link_status_print(dev); @@ -1609,12 +1635,6 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET); hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET); - /* - * hash type is sharing the same word with input port info - * 31-8: input port - * 7:0: hash type - */ - hash_type &= 0xff; mbuf->hash.rss = hash; mbuf->ol_flags |= PKT_RX_RSS_HASH; @@ -1633,29 +1653,6 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, } } -/* nfp_net_check_port - Set mbuf in_port field */ -static void -nfp_net_check_port(struct nfp_net_rx_desc *rxd, struct rte_mbuf *mbuf) -{ - uint32_t port; - - if (!(rxd->rxd.flags & PCIE_DESC_RX_INGRESS_PORT)) { - mbuf->port = 0; - return; - } - - port = rte_be_to_cpu_32(*(uint32_t *)((uint8_t *)mbuf->buf_addr + - mbuf->data_off - 8)); - - /* - * hash type is sharing the same word with input port info - * 31-8: input port - * 7:0: hash type - */ - port = (uint8_t)(port >> 8); - mbuf->port = port; -} - static inline void nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq) { @@ -1712,7 +1709,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) * DPDK just checks the queue is lower than max queues * enabled. But the queue needs to be configured */ - RTE_LOG(ERR, PMD, "RX Bad queue\n"); + RTE_LOG_DP(ERR, PMD, "RX Bad queue\n"); return -EINVAL; } @@ -1725,7 +1722,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxb = &rxq->rxbufs[idx]; if (unlikely(rxb == NULL)) { - RTE_LOG(ERR, PMD, "rxb does not exist!\n"); + RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n"); break; } @@ -1745,7 +1742,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) */ new_mb = rte_pktmbuf_alloc(rxq->mem_pool); if (unlikely(new_mb == NULL)) { - RTE_LOG(DEBUG, PMD, "RX mbuf alloc failed port_id=%u " + RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u " "queue_id=%u\n", (unsigned)rxq->port_id, (unsigned)rxq->qidx); nfp_net_mbuf_alloc_failed(rxq); @@ -1776,7 +1773,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) * responsibility of avoiding it. But we have * to give some info about the error */ - RTE_LOG(ERR, PMD, + RTE_LOG_DP(ERR, PMD, "mbuf overflow likely due to the RX offset.\n" "\t\tYour mbuf size should have extra space for" " RX offset=%u bytes.\n" @@ -1805,13 +1802,10 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) /* Checking the checksum flag */ nfp_net_rx_cksum(rxq, rxds, mb); - /* Checking the port flag */ - nfp_net_check_port(rxds, mb); - if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) && (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) { mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan); - mb->ol_flags |= PKT_RX_VLAN_PKT; + mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; } /* Adding the mbuff to the mbuff array passed by the app */ @@ -1987,11 +1981,16 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ pkt_size = pkt->pkt_len; - while (pkt_size) { - /* Releasing mbuf which was prefetched above */ - if (*lmbuf) - rte_pktmbuf_free_seg(*lmbuf); + /* Releasing mbuf which was prefetched above */ + if (*lmbuf) + rte_pktmbuf_free(*lmbuf); + /* + * Linking mbuf with descriptor for being released + * next time descriptor is used + */ + *lmbuf = pkt; + while (pkt_size) { dma_size = pkt->data_len; dma_addr = rte_mbuf_data_dma_addr(pkt); PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" @@ -2005,12 +2004,6 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) ASSERT(free_descs > 0); free_descs--; - /* - * Linking mbuf with descriptor for being released - * next time descriptor is used - */ - *lmbuf = pkt; - txq->wr_p++; txq->tail++; if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/ @@ -2336,7 +2329,10 @@ nfp_net_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pci_dev = eth_dev->pci_dev; + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; + hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; hw->subsystem_device_id = pci_dev->id.subsystem_device_id; @@ -2404,7 +2400,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : ""); - pci_dev = eth_dev->pci_dev; hw->ctrl = 0; hw->stride_rx = stride; @@ -2413,6 +2408,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n", hw->max_rx_queues, hw->max_tx_queues); + /* Initializing spinlock for reconfigs */ + rte_spinlock_init(&hw->reconfig_lock); + /* Allocating memory for mac addr */ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { @@ -2420,12 +2418,15 @@ nfp_net_init(struct rte_eth_dev *eth_dev) return -ENOMEM; } - /* Using random mac addresses for VFs */ - eth_random_addr(&hw->mac_addr[0]); + nfp_net_read_mac(hw); + + if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) + /* Using random mac addresses for VFs */ + eth_random_addr(&hw->mac_addr[0]); /* Copying mac address to DPDK eth_dev struct */ - ether_addr_copy(ð_dev->data->mac_addrs[0], - (struct ether_addr *)hw->mac_addr); + ether_addr_copy((struct ether_addr *)hw->mac_addr, + ð_dev->data->mac_addrs[0]); PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " "mac=%02x:%02x:%02x:%02x:%02x:%02x", @@ -2453,16 +2454,12 @@ nfp_net_init(struct rte_eth_dev *eth_dev) static struct rte_pci_id pci_id_nfp_net_map[] = { { - .vendor_id = PCI_VENDOR_ID_NETRONOME, - .device_id = PCI_DEVICE_ID_NFP6000_PF_NIC, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID, + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, + PCI_DEVICE_ID_NFP6000_PF_NIC) }, { - .vendor_id = PCI_VENDOR_ID_NETRONOME, - .device_id = PCI_DEVICE_ID_NFP6000_VF_NIC, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID, + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, + PCI_DEVICE_ID_NFP6000_VF_NIC) }, { .vendor_id = 0, @@ -2470,33 +2467,19 @@ static struct rte_pci_id pci_id_nfp_net_map[] = { }; static struct eth_driver rte_nfp_net_pmd = { - { - .name = "rte_nfp_net_pmd", + .pci_drv = { .id_table = pci_id_nfp_net_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = nfp_net_init, .dev_private_size = sizeof(struct nfp_net_adapter), }; -static int -nfp_net_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - PMD_INIT_LOG(INFO, "librte_pmd_nfp_net version %s\n", - NFP_NET_PMD_VERSION); - - rte_eth_driver_register(&rte_nfp_net_pmd); - return 0; -} - -static struct rte_driver rte_nfp_net_driver = { - .type = PMD_PDEV, - .init = nfp_net_pmd_init, -}; - -PMD_REGISTER_DRIVER(rte_nfp_net_driver); +RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map); +RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio"); /* * Local variables: