X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=5c5cba1973495e7173392c48a704ebdedbde839f;hb=0f31eb0cf248acd997b6e56fde63b1edeea9a425;hp=43e9c2a6fba347b82337686fdf6ced03b8158ab6;hpb=ea121b28316db6e063ad70132f9455ebc0a5007f;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 43e9c2a6fb..5c5cba1973 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -63,8 +64,7 @@ /* Prototypes */ static void nfp_net_close(struct rte_eth_dev *dev); static int nfp_net_configure(struct rte_eth_dev *dev); -static void nfp_net_dev_interrupt_handler(struct rte_intr_handle *handle, - void *param); +static void nfp_net_dev_interrupt_handler(void *param); static void nfp_net_dev_interrupt_delayed_handler(void *param); static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static void nfp_net_infos_get(struct rte_eth_dev *dev, @@ -205,26 +205,6 @@ nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val) nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off); } -/* Creating memzone for hardware rings. */ -static const struct rte_memzone * -ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, - uint16_t queue_id, uint32_t ring_size, int socket_id) -{ - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.driver.name, - ring_name, dev->data->port_id, queue_id); - - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, - NFP_MEMZONE_ALIGN); -} - /* * Atomically reads link status information from global structure rte_eth_dev. * @@ -374,12 +354,12 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) if (new == 0) break; if (new & NFP_NET_CFG_UPDATE_ERR) { - PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x\n", new); + PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new); return -1; } if (cnt >= NFP_NET_POLL_TIMEOUT) { PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after" - " %dms\n", update, cnt); + " %dms", update, cnt); rte_panic("Exiting\n"); } nanosleep(&wait, 0); /* waiting for a 1ms */ @@ -423,7 +403,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) * Reconfig errors imply situations where they can be handled. * Otherwise, rte_panic is called inside __nfp_net_reconfig */ - PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x\n", + PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x", ctrl, update); return -EIO; } @@ -453,7 +433,7 @@ nfp_net_configure(struct rte_eth_dev *dev) * called after that internal process */ - PMD_INIT_LOG(DEBUG, "Configure\n"); + PMD_INIT_LOG(DEBUG, "Configure"); dev_conf = &dev->data->dev_conf; rxmode = &dev_conf->rxmode; @@ -461,7 +441,7 @@ nfp_net_configure(struct rte_eth_dev *dev) /* Checking TX mode */ if (txmode->mq_mode) { - PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported\n"); + PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported"); return -EINVAL; } @@ -471,13 +451,13 @@ nfp_net_configure(struct rte_eth_dev *dev) update = NFP_NET_CFG_UPDATE_RSS; new_ctrl = NFP_NET_CFG_CTRL_RSS; } else { - PMD_INIT_LOG(INFO, "RSS not supported\n"); + PMD_INIT_LOG(INFO, "RSS not supported"); return -EINVAL; } } if (rxmode->split_hdr_size) { - PMD_INIT_LOG(INFO, "rxmode does not support split header\n"); + PMD_INIT_LOG(INFO, "rxmode does not support split header"); return -EINVAL; } @@ -485,13 +465,13 @@ nfp_net_configure(struct rte_eth_dev *dev) if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) { new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM; } else { - PMD_INIT_LOG(INFO, "RXCSUM not supported\n"); + PMD_INIT_LOG(INFO, "RXCSUM not supported"); return -EINVAL; } } if (rxmode->hw_vlan_filter) { - PMD_INIT_LOG(INFO, "VLAN filter not supported\n"); + PMD_INIT_LOG(INFO, "VLAN filter not supported"); return -EINVAL; } @@ -499,13 +479,13 @@ nfp_net_configure(struct rte_eth_dev *dev) if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) { new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN; } else { - PMD_INIT_LOG(INFO, "hw vlan strip not supported\n"); + PMD_INIT_LOG(INFO, "hw vlan strip not supported"); return -EINVAL; } } if (rxmode->hw_vlan_extend) { - PMD_INIT_LOG(INFO, "VLAN extended not supported\n"); + PMD_INIT_LOG(INFO, "VLAN extended not supported"); return -EINVAL; } @@ -517,12 +497,12 @@ nfp_net_configure(struct rte_eth_dev *dev) /* this is handled in rte_eth_dev_configure */ if (rxmode->hw_strip_crc) { - PMD_INIT_LOG(INFO, "strip CRC not supported\n"); + PMD_INIT_LOG(INFO, "strip CRC not supported"); return -EINVAL; } if (rxmode->enable_scatter) { - PMD_INIT_LOG(INFO, "Scatter not supported\n"); + PMD_INIT_LOG(INFO, "Scatter not supported"); return -EINVAL; } @@ -638,7 +618,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, dev->data->nb_rx_queues * sizeof(int), 0); if (!intr_handle->intr_vec) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec\n", dev->data->nb_rx_queues); + " intr_vec", dev->data->nb_rx_queues); return -ENOMEM; } } @@ -646,11 +626,11 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (intr_handle->type == RTE_INTR_HANDLE_UIO) { - PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO\n"); + PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO"); /* UIO just supports one queue and no LSC*/ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0); } else { - PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO\n"); + PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO"); for (i = 0; i < dev->data->nb_rx_queues; i++) /* * The first msix vector is reserved for non @@ -676,7 +656,7 @@ nfp_net_start(struct rte_eth_dev *dev) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - PMD_INIT_LOG(DEBUG, "Start\n"); + PMD_INIT_LOG(DEBUG, "Start"); /* Disabling queues just in case... */ nfp_net_disable_queues(dev); @@ -708,7 +688,8 @@ nfp_net_start(struct rte_eth_dev *dev) return -1; } - nfp_configure_rx_interrupt(dev, intr_handle); + if (rte_intr_dp_is_en(intr_handle)) + nfp_configure_rx_interrupt(dev, intr_handle); rte_intr_enable(intr_handle); @@ -765,7 +746,7 @@ nfp_net_stop(struct rte_eth_dev *dev) { int i; - PMD_INIT_LOG(DEBUG, "Stop\n"); + PMD_INIT_LOG(DEBUG, "Stop"); nfp_net_disable_queues(dev); @@ -788,7 +769,7 @@ nfp_net_close(struct rte_eth_dev *dev) struct nfp_net_hw *hw; struct rte_pci_device *pci_dev; - PMD_INIT_LOG(DEBUG, "Close\n"); + PMD_INIT_LOG(DEBUG, "Close"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); pci_dev = RTE_DEV_TO_PCI(dev->device); @@ -825,7 +806,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) { - PMD_INIT_LOG(INFO, "Promiscuous mode not supported\n"); + PMD_INIT_LOG(INFO, "Promiscuous mode not supported"); return; } @@ -1152,6 +1133,9 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G | ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G | ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G; + + if (hw->cap & NFP_NET_CFG_CTRL_LSO) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; } static const uint32_t * @@ -1181,11 +1165,6 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx]; - if (rxq == NULL) { - PMD_INIT_LOG(ERR, "Bad queue: %u\n", queue_idx); - return 0; - } - idx = rxq->rd_p; count = 0; @@ -1305,8 +1284,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev) } static void -nfp_net_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, - void *param) +nfp_net_dev_interrupt_handler(void *param) { int64_t timeout; struct rte_eth_link link; @@ -1458,9 +1436,10 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, + tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, sizeof(struct nfp_net_rx_desc) * - NFP_NET_MAX_RX_DESC, socket_id); + NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN, + socket_id); if (tz == NULL) { RTE_LOG(ERR, PMD, "Error allocatig rx dma\n"); @@ -1600,9 +1579,10 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, sizeof(struct nfp_net_tx_desc) * - NFP_NET_MAX_TX_DESC, socket_id); + NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN, + socket_id); if (tz == NULL) { RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); nfp_net_tx_queue_release(txq); @@ -1653,6 +1633,33 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return 0; } +/* nfp_net_tx_tso - Set TX descriptor for TSO */ +static inline void +nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, + struct rte_mbuf *mb) +{ + uint64_t ol_flags; + struct nfp_net_hw *hw = txq->hw; + + if (!(hw->cap & NFP_NET_CFG_CTRL_LSO)) + goto clean_txd; + + ol_flags = mb->ol_flags; + + if (!(ol_flags & PKT_TX_TCP_SEG)) + goto clean_txd; + + txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len; + txd->lso = rte_cpu_to_le_16(mb->tso_segsz); + txd->flags = PCIE_DESC_TX_LSO; + return; + +clean_txd: + txd->flags = 0; + txd->l4_offset = 0; + txd->lso = 0; +} + /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */ static inline void nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, @@ -2021,7 +2028,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct nfp_net_txq *txq; struct nfp_net_hw *hw; - struct nfp_net_tx_desc *txds; + struct nfp_net_tx_desc *txds, txd; struct rte_mbuf *pkt; uint64_t dma_addr; int pkt_size, dma_size; @@ -2060,7 +2067,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (unlikely((pkt->nb_segs > 1) && !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) { - PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set\n"); + PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set"); rte_panic("Multisegment packet unsupported\n"); } @@ -2070,19 +2077,18 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* * Checksum and VLAN flags just in the first descriptor for a - * multisegment packet + * multisegment packet, but TSO info needs to be in all of them. */ - nfp_net_tx_cksum(txq, txds, pkt); + txd.data_len = pkt->pkt_len; + nfp_net_tx_tso(txq, &txd, pkt); + nfp_net_tx_cksum(txq, &txd, pkt); if ((pkt->ol_flags & PKT_TX_VLAN_PKT) && (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) { - txds->flags |= PCIE_DESC_TX_VLAN; - txds->vlan = pkt->vlan_tci; + txd.flags |= PCIE_DESC_TX_VLAN; + txd.vlan = pkt->vlan_tci; } - if (pkt->ol_flags & PKT_TX_TCP_SEG) - rte_panic("TSO is not supported\n"); - /* * mbuf data_len is the data in one segment and pkt_len data * in the whole packet. When the packet is just one segment, @@ -2090,16 +2096,20 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ pkt_size = pkt->pkt_len; - /* Releasing mbuf which was prefetched above */ - if (*lmbuf) - rte_pktmbuf_free(*lmbuf); - /* - * Linking mbuf with descriptor for being released - * next time descriptor is used - */ - *lmbuf = pkt; - while (pkt_size) { + /* Copying TSO, VLAN and cksum info */ + *txds = txd; + + /* Releasing mbuf used by this descriptor previously*/ + if (*lmbuf) + rte_pktmbuf_free_seg(*lmbuf); + + /* + * Linking mbuf with descriptor for being released + * next time descriptor is used + */ + *lmbuf = pkt; + dma_size = pkt->data_len; dma_addr = rte_mbuf_data_dma_addr(pkt); PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" @@ -2107,7 +2117,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Filling descriptors fields */ txds->dma_len = dma_size; - txds->data_len = pkt->pkt_len; + txds->data_len = txd.data_len; txds->dma_addr_hi = (dma_addr >> 32) & 0xff; txds->dma_addr_lo = (dma_addr & 0xffffffff); ASSERT(free_descs > 0); @@ -2127,6 +2137,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* Referencing next free TX descriptor */ txds = &txq->txds[txq->wr_p]; + lmbuf = &txq->txbufs[txq->wr_p].mbuf; issued_descs++; } i++; @@ -2150,9 +2161,9 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) new_ctrl = 0; if ((mask & ETH_VLAN_FILTER_OFFLOAD) || - (mask & ETH_VLAN_FILTER_OFFLOAD)) - RTE_LOG(INFO, PMD, "Not support for ETH_VLAN_FILTER_OFFLOAD or" - " ETH_VLAN_FILTER_EXTEND"); + (mask & ETH_VLAN_EXTEND_OFFLOAD)) + RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or" + " ETH_VLAN_EXTEND_OFFLOAD"); /* Enable vlan strip if it is not configured yet */ if ((mask & ETH_VLAN_STRIP_OFFLOAD) && @@ -2441,14 +2452,14 @@ nfp_net_init(struct rte_eth_dev *eth_dev) pci_dev = RTE_DEV_TO_PCI(eth_dev->device); rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; + eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; hw->subsystem_device_id = pci_dev->id.subsystem_device_id; hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; - PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u\n", + PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u", pci_dev->id.vendor_id, pci_dev->id.device_id, pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); @@ -2475,13 +2486,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev) return -ENODEV; } - PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x\n", tx_bar_off); - PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x\n", rx_bar_off); + PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x", tx_bar_off); + PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x", rx_bar_off); hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off; hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off; - PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p\n", + PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", hw->ctrl_bar, hw->tx_bar, hw->rx_bar); nfp_net_cfg_queue_setup(hw); @@ -2497,9 +2508,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) else hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR); - PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d\n", + PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d", hw->ver, hw->max_mtu); - PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s\n", hw->cap, + PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s", hw->cap, hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", @@ -2515,7 +2526,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->stride_rx = stride; hw->stride_tx = stride; - PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n", + PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u", hw->max_rx_queues, hw->max_tx_queues); /* Initializing spinlock for reconfigs */ @@ -2559,7 +2570,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) return 0; } -static struct rte_pci_id pci_id_nfp_net_map[] = { +static const struct rte_pci_id pci_id_nfp_net_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP6000_PF_NIC) @@ -2573,18 +2584,26 @@ static struct rte_pci_id pci_id_nfp_net_map[] = { }, }; -static struct eth_driver rte_nfp_net_pmd = { - .pci_drv = { - .id_table = pci_id_nfp_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = nfp_net_init, - .dev_private_size = sizeof(struct nfp_net_adapter), +static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct nfp_net_adapter), nfp_net_init); +} + +static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} + +static struct rte_pci_driver rte_nfp_net_pmd = { + .id_table = pci_id_nfp_net_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_nfp_pci_probe, + .remove = eth_nfp_pci_remove, }; -RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map); RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio");