X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=a76dbd48fa80d87b5ae8b351fe3a03b9379078da;hb=f78f0e64f772aa32bdca79a17835db8f2c57c1d9;hp=9d9420d57ccff3da6d277ec3ab0eef34e1acc676;hpb=2dfcada3eba1c1f5ddb275a48dd044476ac53825;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 9d9420d57c..a76dbd48fa 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -39,13 +39,12 @@ * Netronome vNIC DPDK Poll-Mode Driver: Main entry point */ -#include - #include #include #include #include #include +#include #include #include #include @@ -63,8 +62,7 @@ /* Prototypes */ static void nfp_net_close(struct rte_eth_dev *dev); static int nfp_net_configure(struct rte_eth_dev *dev); -static void nfp_net_dev_interrupt_handler(struct rte_intr_handle *handle, - void *param); +static void nfp_net_dev_interrupt_handler(void *param); static void nfp_net_dev_interrupt_delayed_handler(void *param); static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static void nfp_net_infos_get(struct rte_eth_dev *dev, @@ -205,26 +203,6 @@ nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val) nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off); } -/* Creating memzone for hardware rings. */ -static const struct rte_memzone * -ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, - uint16_t queue_id, uint32_t ring_size, int socket_id) -{ - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.driver.name, - ring_name, dev->data->port_id, queue_id); - - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, - NFP_MEMZONE_ALIGN); -} - /* * Atomically reads link status information from global structure rte_eth_dev. * @@ -625,6 +603,20 @@ static void nfp_net_read_mac(struct nfp_net_hw *hw) memcpy(&hw->mac_addr[4], &tmp, 2); } +static void +nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac) +{ + uint32_t mac0 = *(uint32_t *)mac; + uint16_t mac1; + + nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR); + + mac += 4; + mac1 = *(uint16_t *)mac; + nn_writew(rte_cpu_to_be_16(mac1), + hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6); +} + static int nfp_configure_rx_interrupt(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) @@ -649,14 +641,19 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO"); /* UIO just supports one queue and no LSC*/ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0); + intr_handle->intr_vec[0] = 0; } else { PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO"); - for (i = 0; i < dev->data->nb_rx_queues; i++) + for (i = 0; i < dev->data->nb_rx_queues; i++) { /* * The first msix vector is reserved for non * efd interrupts */ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1); + intr_handle->intr_vec[i] = i + 1; + PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i, + intr_handle->intr_vec[i]); + } } /* Avoiding TX interrupts */ @@ -667,7 +664,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, static int nfp_net_start(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; @@ -706,19 +703,17 @@ nfp_net_start(struct rte_eth_dev *dev) intr_vector = dev->data->nb_rx_queues; if (rte_intr_efd_enable(intr_handle, intr_vector)) return -1; - } - nfp_configure_rx_interrupt(dev, intr_handle); + nfp_configure_rx_interrupt(dev, intr_handle); + update = NFP_NET_CFG_UPDATE_MSIX; + } rte_intr_enable(intr_handle); /* Enable device */ new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE; - update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; - /* Just configuring queues interrupts when necessary */ - if (rte_intr_dp_is_en(intr_handle)) - update |= NFP_NET_CFG_UPDATE_MSIX; + update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; @@ -791,7 +786,7 @@ nfp_net_close(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "Close"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pci_dev = RTE_DEV_TO_PCI(dev->device); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); /* * We assume that the DPDK application is stopping all the @@ -1100,7 +1095,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; dev_info->min_rx_bufsize = ETHER_MIN_MTU; @@ -1192,7 +1187,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) * Other PMDs are just checking the DD bit in intervals of 4 * descriptors and counting all four if the first has the DD * bit on. Of course, this is not accurate but can be good for - * perfomance. But ideally that should be done in descriptors + * performance. But ideally that should be done in descriptors * chunks belonging to the same cache line */ @@ -1220,7 +1215,7 @@ nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) int base = 0; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pci_dev = RTE_DEV_TO_PCI(dev->device); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO) base = 1; @@ -1240,7 +1235,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) int base = 0; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pci_dev = RTE_DEV_TO_PCI(dev->device); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO) base = 1; @@ -1254,7 +1249,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) static void nfp_net_dev_link_status_print(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; memset(&link, 0, sizeof(link)); @@ -1288,7 +1283,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pci_dev = RTE_DEV_TO_PCI(dev->device); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { /* If MSI-X auto-masking is used, clear the entry */ @@ -1303,8 +1298,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev) } static void -nfp_net_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, - void *param) +nfp_net_dev_interrupt_handler(void *param) { int64_t timeout; struct rte_eth_link link; @@ -1354,7 +1348,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param) struct rte_eth_dev *dev = (struct rte_eth_dev *)param; nfp_net_link_update(dev, 0); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL); nfp_net_dev_link_status_print(dev); @@ -1456,9 +1450,10 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, + tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, sizeof(struct nfp_net_rx_desc) * - NFP_NET_MAX_RX_DESC, socket_id); + NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN, + socket_id); if (tz == NULL) { RTE_LOG(ERR, PMD, "Error allocatig rx dma\n"); @@ -1492,7 +1487,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, * of descriptors in log2 format */ nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma); - nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc)); + nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc)); return 0; } @@ -1598,9 +1593,10 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, sizeof(struct nfp_net_tx_desc) * - NFP_NET_MAX_TX_DESC, socket_id); + NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN, + socket_id); if (tz == NULL) { RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); nfp_net_tx_queue_release(txq); @@ -1646,7 +1642,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, * of descriptors in log2 format */ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma); - nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc)); + nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc)); return 0; } @@ -2097,6 +2093,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * Checksum and VLAN flags just in the first descriptor for a * multisegment packet, but TSO info needs to be in all of them. */ + txd.data_len = pkt->pkt_len; nfp_net_tx_tso(txq, &txd, pkt); nfp_net_tx_cksum(txq, &txd, pkt); @@ -2113,18 +2110,20 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ pkt_size = pkt->pkt_len; - /* Releasing mbuf which was prefetched above */ - if (*lmbuf) - rte_pktmbuf_free(*lmbuf); - /* - * Linking mbuf with descriptor for being released - * next time descriptor is used - */ - *lmbuf = pkt; - - while (pkt_size) { + while (pkt) { /* Copying TSO, VLAN and cksum info */ *txds = txd; + + /* Releasing mbuf used by this descriptor previously*/ + if (*lmbuf) + rte_pktmbuf_free_seg(*lmbuf); + + /* + * Linking mbuf with descriptor for being released + * next time descriptor is used + */ + *lmbuf = pkt; + dma_size = pkt->data_len; dma_addr = rte_mbuf_data_dma_addr(pkt); PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" @@ -2132,7 +2131,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Filling descriptors fields */ txds->dma_len = dma_size; - txds->data_len = pkt->pkt_len; + txds->data_len = txd.data_len; txds->dma_addr_hi = (dma_addr >> 32) & 0xff; txds->dma_addr_lo = (dma_addr & 0xffffffff); ASSERT(free_descs > 0); @@ -2143,15 +2142,16 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txq->wr_p = 0; pkt_size -= dma_size; - if (!pkt_size) { + if (!pkt_size) /* End of packet */ txds->offset_eop |= PCIE_DESC_TX_EOP; - } else { + else txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK; - pkt = pkt->next; - } + + pkt = pkt->next; /* Referencing next free TX descriptor */ txds = &txq->txds[txq->wr_p]; + lmbuf = &txq->txbufs[txq->wr_p].mbuf; issued_descs++; } i++; @@ -2249,7 +2249,8 @@ nfp_net_reta_update(struct rte_eth_dev *dev, reta &= ~(0xFF << (8 * j)); reta |= reta_conf[idx].reta[shift + j] << (8 * j); } - nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta); + nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, + reta); } update = NFP_NET_CFG_UPDATE_RSS; @@ -2296,7 +2297,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev, if (!mask) continue; - reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift); + reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + + shift); for (j = 0; j < 4; j++) { if (!(mask & (0x1 << j))) continue; @@ -2346,6 +2348,9 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, NFP_NET_CFG_RSS_IPV6_TCP | NFP_NET_CFG_RSS_IPV6_UDP; + cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK; + cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ; + /* configuring where to apply the RSS hash */ nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl); @@ -2464,7 +2469,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; @@ -2555,9 +2560,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev) nfp_net_read_mac(hw); - if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) + if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) { /* Using random mac addresses for VFs */ eth_random_addr(&hw->mac_addr[0]); + nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); + } /* Copying mac address to DPDK eth_dev struct */ ether_addr_copy((struct ether_addr *)hw->mac_addr, @@ -2598,20 +2605,28 @@ static const struct rte_pci_id pci_id_nfp_net_map[] = { }, }; -static struct eth_driver rte_nfp_net_pmd = { - .pci_drv = { - .id_table = pci_id_nfp_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = nfp_net_init, - .dev_private_size = sizeof(struct nfp_net_adapter), +static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct nfp_net_adapter), nfp_net_init); +} + +static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} + +static struct rte_pci_driver rte_nfp_net_pmd = { + .id_table = pci_id_nfp_net_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_nfp_pci_probe, + .remove = eth_nfp_pci_remove, }; -RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map); -RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio-pci"); /* * Local variables: