X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=5c9f350238c71a0e7c32eeb7e4ed94fa38051532;hb=9c24780f0d5efd09fc238a5ad4a36a13fee0a792;hp=f12288c112ccc469c11fc058116f1d1a8a87d666;hpb=79c4319c1a5d66c656b0997d7ea8fc3a42d91cae;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index f12288c112..5c9f350238 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -54,6 +54,7 @@ #include #include #include +#include #include "nfp_net_pmd.h" #include "nfp_net_logs.h" @@ -322,7 +323,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) for (i = 0; i < txq->tx_count; i++) { if (txq->txbufs[i].mbuf) { - rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); + rte_pktmbuf_free(txq->txbufs[i].mbuf); txq->txbufs[i].mbuf = NULL; } } @@ -407,6 +408,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n", ctrl, update); + rte_spinlock_lock(&hw->reconfig_lock); + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl); nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update); @@ -414,6 +417,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) err = __nfp_net_reconfig(hw, update); + rte_spinlock_unlock(&hw->reconfig_lock); + if (!err) return 0; @@ -817,11 +822,11 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) memset(&link, 0, sizeof(struct rte_eth_link)); if (nn_link_status & NFP_NET_CFG_STS_LINK) - link.link_status = 1; + link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; /* Other cards can limit the tx and rx rate per VF */ - link.link_speed = ETH_LINK_SPEED_40G; + link.link_speed = ETH_SPEED_NUM_40G; if (old.link_status != link.link_status) { nfp_net_dev_atomic_write_link_status(dev, &link); @@ -902,11 +907,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) nfp_dev_stats.obytes -= hw->eth_stats_base.obytes; - nfp_dev_stats.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - - nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts; - /* reading general device stats */ nfp_dev_stats.ierrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); @@ -918,12 +918,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors; - /* Multicast frames received */ - nfp_dev_stats.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - - nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts; - /* RX ring mbuf allocation failures */ nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed; @@ -985,9 +979,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.obytes = nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); - hw->eth_stats_base.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - /* reading general device stats */ hw->eth_stats_base.ierrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); @@ -995,10 +986,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.oerrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); - /* Multicast frames received */ - hw->eth_stats_base.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - /* RX ring mbuf allocation failures */ dev->data->rx_mbuf_alloc_failed = 0; @@ -1061,6 +1048,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; + + dev_info->speed_capa = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G; } static const uint32_t * @@ -1811,7 +1800,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) && (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) { mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan); - mb->ol_flags |= PKT_RX_VLAN_PKT; + mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; } /* Adding the mbuff to the mbuff array passed by the app */ @@ -1987,11 +1976,16 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ pkt_size = pkt->pkt_len; - while (pkt_size) { - /* Releasing mbuf which was prefetched above */ - if (*lmbuf) - rte_pktmbuf_free_seg(*lmbuf); + /* Releasing mbuf which was prefetched above */ + if (*lmbuf) + rte_pktmbuf_free(*lmbuf); + /* + * Linking mbuf with descriptor for being released + * next time descriptor is used + */ + *lmbuf = pkt; + while (pkt_size) { dma_size = pkt->data_len; dma_addr = rte_mbuf_data_dma_addr(pkt); PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" @@ -2005,12 +1999,6 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) ASSERT(free_descs > 0); free_descs--; - /* - * Linking mbuf with descriptor for being released - * next time descriptor is used - */ - *lmbuf = pkt; - txq->wr_p++; txq->tail++; if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/ @@ -2415,6 +2403,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n", hw->max_rx_queues, hw->max_tx_queues); + /* Initializing spinlock for reconfigs */ + rte_spinlock_init(&hw->reconfig_lock); + /* Allocating memory for mac addr */ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { @@ -2475,7 +2466,8 @@ static struct eth_driver rte_nfp_net_pmd = { { .name = "rte_nfp_net_pmd", .id_table = pci_id_nfp_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_DETACHABLE, }, .eth_dev_init = nfp_net_init, .dev_private_size = sizeof(struct nfp_net_adapter),