X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=e315dd8d158c6910e59e9ca0eb323205c5f6ae87;hb=0880c40113ef2d69b6433d7dfa0b4032cc378b0d;hp=7c82e96fdce323f63619962991dec7cec9592f1f;hpb=cef1f80504ea2052985fe6485c978e59245252df;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 7c82e96fdc..e315dd8d15 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -39,18 +39,7 @@ * Netronome vNIC DPDK Poll-Mode Driver: Main entry point */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include #include #include @@ -65,6 +54,7 @@ #include #include #include +#include #include "nfp_net_pmd.h" #include "nfp_net_logs.h" @@ -73,8 +63,16 @@ /* Prototypes */ static void nfp_net_close(struct rte_eth_dev *dev); static int nfp_net_configure(struct rte_eth_dev *dev); +static void nfp_net_dev_interrupt_handler(struct rte_intr_handle *handle, + void *param); +static void nfp_net_dev_interrupt_delayed_handler(void *param); +static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static void nfp_net_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static int nfp_net_init(struct rte_eth_dev *eth_dev); static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete); +static void nfp_net_promisc_enable(struct rte_eth_dev *dev); +static void nfp_net_promisc_disable(struct rte_eth_dev *dev); static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq); static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx); @@ -216,7 +214,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, + dev->driver->pci_drv.driver.name, ring_name, dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); @@ -325,7 +323,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) for (i = 0; i < txq->tx_count; i++) { if (txq->txbufs[i].mbuf) { - rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); + rte_pktmbuf_free(txq->txbufs[i].mbuf); txq->txbufs[i].mbuf = NULL; } } @@ -350,6 +348,7 @@ nfp_net_reset_tx_queue(struct nfp_net_txq *txq) txq->wr_p = 0; txq->rd_p = 0; txq->tail = 0; + txq->qcp_rd_p = 0; } static int @@ -409,6 +408,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n", ctrl, update); + rte_spinlock_lock(&hw->reconfig_lock); + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl); nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update); @@ -416,6 +417,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) err = __nfp_net_reconfig(hw, update); + rte_spinlock_unlock(&hw->reconfig_lock); + if (!err) return 0; @@ -604,18 +607,8 @@ nfp_net_rx_freelist_setup(struct rte_eth_dev *dev) static void nfp_net_params_setup(struct nfp_net_hw *hw) { - uint32_t *mac_address; - nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu); nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz); - - /* A MAC address is 8 bytes long */ - mac_address = (uint32_t *)(hw->mac_addr); - - nn_cfg_writel(hw, NFP_NET_CFG_MACADDR, - rte_cpu_to_be_32(*mac_address)); - nn_cfg_writel(hw, NFP_NET_CFG_MACADDR + 4, - rte_cpu_to_be_32(*(mac_address + 4))); } static void @@ -624,6 +617,17 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw) hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; } +static void nfp_net_read_mac(struct nfp_net_hw *hw) +{ + uint32_t tmp; + + tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR)); + memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr)); + + tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4)); + memcpy(&hw->mac_addr[4], &tmp, 2); +} + static int nfp_net_start(struct rte_eth_dev *dev) { @@ -726,14 +730,79 @@ nfp_net_close(struct rte_eth_dev *dev) nfp_net_stop(dev); + rte_intr_disable(&dev->pci_dev->intr_handle); nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); + /* unregister callback func from eal lib */ + rte_intr_callback_unregister(&dev->pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, + (void *)dev); + /* * The ixgbe PMD driver disables the pcie master on the * device. The i40e does not... */ } +static void +nfp_net_promisc_enable(struct rte_eth_dev *dev) +{ + uint32_t new_ctrl, update = 0; + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) { + PMD_INIT_LOG(INFO, "Promiscuous mode not supported\n"); + return; + } + + if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { + PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n"); + return; + } + + new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC; + update = NFP_NET_CFG_UPDATE_GEN; + + /* + * DPDK sets promiscuous mode on just after this call assuming + * it can not fail ... + */ + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return; + + hw->ctrl = new_ctrl; +} + +static void +nfp_net_promisc_disable(struct rte_eth_dev *dev) +{ + uint32_t new_ctrl, update = 0; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { + PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n"); + return; + } + + new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC; + update = NFP_NET_CFG_UPDATE_GEN; + + /* + * DPDK sets promiscuous mode off just before this call + * assuming it can not fail ... + */ + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return; + + hw->ctrl = new_ctrl; +} + /* * return 0 means link status changed, -1 means not changed * @@ -759,11 +828,11 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) memset(&link, 0, sizeof(struct rte_eth_link)); if (nn_link_status & NFP_NET_CFG_STS_LINK) - link.link_status = 1; + link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; /* Other cards can limit the tx and rx rate per VF */ - link.link_speed = ETH_LINK_SPEED_40G; + link.link_speed = ETH_SPEED_NUM_40G; if (old.link_status != link.link_status) { nfp_net_dev_atomic_write_link_status(dev, &link); @@ -844,11 +913,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) nfp_dev_stats.obytes -= hw->eth_stats_base.obytes; - nfp_dev_stats.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - - nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts; - /* reading general device stats */ nfp_dev_stats.ierrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); @@ -860,12 +924,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors; - /* Multicast frames received */ - nfp_dev_stats.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - - nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts; - /* RX ring mbuf allocation failures */ nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed; @@ -927,9 +985,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.obytes = nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); - hw->eth_stats_base.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - /* reading general device stats */ hw->eth_stats_base.ierrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); @@ -937,10 +992,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.oerrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); - /* Multicast frames received */ - hw->eth_stats_base.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - /* RX ring mbuf allocation failures */ dev->data->rx_mbuf_alloc_failed = 0; @@ -948,6 +999,82 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); } +static void +nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->driver_name = dev->driver->pci_drv.driver.name; + dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; + dev_info->min_rx_bufsize = ETHER_MIN_MTU; + dev_info->max_rx_pktlen = hw->mtu; + /* Next should change when PF support is implemented */ + dev_info->max_mac_addrs = 1; + + if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; + + if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; + + if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = DEFAULT_RX_PTHRESH, + .hthresh = DEFAULT_RX_HTHRESH, + .wthresh = DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = DEFAULT_TX_PTHRESH, + .hthresh = DEFAULT_TX_HTHRESH, + .wthresh = DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; + + dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; + dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; + + dev_info->speed_capa = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G; +} + +static const uint32_t * +nfp_net_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to nfp_net_set_hash() */ + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_MASK, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == nfp_net_recv_pkts) + return ptypes; + return NULL; +} + static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) { @@ -992,6 +1119,142 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) return count; } +static void +nfp_net_dev_link_status_print(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + + memset(&link, 0, sizeof(link)); + nfp_net_dev_atomic_read_link_status(dev, &link); + if (link.link_status) + RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n", + (int)(dev->data->port_id), (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX + ? "full-duplex" : "half-duplex"); + else + RTE_LOG(INFO, PMD, " Port %d: Link Down\n", + (int)(dev->data->port_id)); + + RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n", + dev->pci_dev->addr.domain, dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, dev->pci_dev->addr.function); +} + +/* Interrupt configuration and handling */ + +/* + * nfp_net_irq_unmask - Unmask an interrupt + * + * If MSI-X auto-masking is enabled clear the mask bit, otherwise + * clear the ICR for the entry. + */ +static void +nfp_net_irq_unmask(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { + /* If MSI-X auto-masking is used, clear the entry */ + rte_wmb(); + rte_intr_enable(&dev->pci_dev->intr_handle); + } else { + /* Make sure all updates are written before un-masking */ + rte_wmb(); + nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX), + NFP_NET_CFG_ICR_UNMASKED); + } +} + +static void +nfp_net_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + int64_t timeout; + struct rte_eth_link link; + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n"); + + /* get the link status */ + memset(&link, 0, sizeof(link)); + nfp_net_dev_atomic_read_link_status(dev, &link); + + nfp_net_link_update(dev, 0); + + /* likely to up */ + if (!link.link_status) { + /* handle it 1 sec later, wait it being stable */ + timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT; + /* likely to down */ + } else { + /* handle it 4 sec later, wait it being stable */ + timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT; + } + + if (rte_eal_alarm_set(timeout * 1000, + nfp_net_dev_interrupt_delayed_handler, + (void *)dev) < 0) { + RTE_LOG(ERR, PMD, "Error setting alarm"); + /* Unmasking */ + nfp_net_irq_unmask(dev); + } +} + +/* + * Interrupt handler which shall be registered for alarm callback for delayed + * handling specific interrupt to wait for the stable nic state. As the NIC + * interrupt state is not stable for nfp after link is just down, it needs + * to wait 4 seconds to get the stable status. + * + * @param handle Pointer to interrupt handle. + * @param param The address of parameter (struct rte_eth_dev *) + * + * @return void + */ +static void +nfp_net_dev_interrupt_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + nfp_net_link_update(dev, 0); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + + nfp_net_dev_link_status_print(dev); + + /* Unmasking */ + nfp_net_irq_unmask(dev); +} + +static int +nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu)) + return -EINVAL; + + /* switch to jumbo mode if needed */ + if ((uint32_t)mtu > ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.jumbo_frame = 1; + else + dev->data->dev_conf.rxmode.jumbo_frame = 0; + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu; + + /* writing to configuration space */ + nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu); + + hw->mtu = mtu; + + return 0; +} + static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -1012,7 +1275,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, (nb_desc > NFP_NET_MAX_RX_DESC) || (nb_desc < NFP_NET_MIN_RX_DESC)) { RTE_LOG(ERR, PMD, "Wrong nb_desc value\n"); - return (-EINVAL); + return -EINVAL; } /* @@ -1028,7 +1291,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq), RTE_CACHE_LINE_SIZE, socket_id); if (rxq == NULL) - return (-ENOMEM); + return -ENOMEM; /* Hw queues mapping based on firmware confifguration */ rxq->qidx = queue_idx; @@ -1065,7 +1328,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, if (tz == NULL) { RTE_LOG(ERR, PMD, "Error allocatig rx dma\n"); nfp_net_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } /* Saving physical and virtual addresses for the RX ring */ @@ -1078,7 +1341,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE, socket_id); if (rxq->rxbufs == NULL) { nfp_net_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n", @@ -1116,7 +1379,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) if (mbuf == NULL) { RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n", (unsigned)rxq->qidx); - return (-ENOMEM); + return -ENOMEM; } dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf)); @@ -1194,7 +1457,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, RTE_CACHE_LINE_SIZE, socket_id); if (txq == NULL) { RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); - return (-ENOMEM); + return -ENOMEM; } /* @@ -1208,7 +1471,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, if (tz == NULL) { RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); nfp_net_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } txq->tx_count = nb_desc; @@ -1236,7 +1499,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, RTE_CACHE_LINE_SIZE, socket_id); if (txq->txbufs == NULL) { nfp_net_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n", txq->txbufs, txq->txds, (unsigned long int)txq->dma); @@ -1261,7 +1524,7 @@ static inline void nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, struct rte_mbuf *mb) { - uint16_t ol_flags; + uint64_t ol_flags; struct nfp_net_hw *hw = txq->hw; if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) @@ -1282,7 +1545,8 @@ nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, break; } - txd->flags |= PCIE_DESC_TX_CSUM; + if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)) + txd->flags |= PCIE_DESC_TX_CSUM; } /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */ @@ -1443,7 +1707,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) * DPDK just checks the queue is lower than max queues * enabled. But the queue needs to be configured */ - RTE_LOG(ERR, PMD, "RX Bad queue\n"); + RTE_LOG_DP(ERR, PMD, "RX Bad queue\n"); return -EINVAL; } @@ -1456,7 +1720,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxb = &rxq->rxbufs[idx]; if (unlikely(rxb == NULL)) { - RTE_LOG(ERR, PMD, "rxb does not exist!\n"); + RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n"); break; } @@ -1476,7 +1740,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) */ new_mb = rte_pktmbuf_alloc(rxq->mem_pool); if (unlikely(new_mb == NULL)) { - RTE_LOG(DEBUG, PMD, "RX mbuf alloc failed port_id=%u " + RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u " "queue_id=%u\n", (unsigned)rxq->port_id, (unsigned)rxq->qidx); nfp_net_mbuf_alloc_failed(rxq); @@ -1507,7 +1771,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) * responsibility of avoiding it. But we have * to give some info about the error */ - RTE_LOG(ERR, PMD, + RTE_LOG_DP(ERR, PMD, "mbuf overflow likely due to the RX offset.\n" "\t\tYour mbuf size should have extra space for" " RX offset=%u bytes.\n" @@ -1542,7 +1806,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) && (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) { mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan); - mb->ol_flags |= PKT_RX_VLAN_PKT; + mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; } /* Adding the mbuff to the mbuff array passed by the app */ @@ -1718,13 +1982,18 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ pkt_size = pkt->pkt_len; - while (pkt_size) { - /* Releasing mbuf which was prefetched above */ - if (*lmbuf) - rte_pktmbuf_free_seg(*lmbuf); + /* Releasing mbuf which was prefetched above */ + if (*lmbuf) + rte_pktmbuf_free(*lmbuf); + /* + * Linking mbuf with descriptor for being released + * next time descriptor is used + */ + *lmbuf = pkt; + while (pkt_size) { dma_size = pkt->data_len; - dma_addr = RTE_MBUF_DATA_DMA_ADDR(pkt); + dma_addr = rte_mbuf_data_dma_addr(pkt); PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" "%" PRIx64 "\n", dma_addr); @@ -1736,12 +2005,6 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) ASSERT(free_descs > 0); free_descs--; - /* - * Linking mbuf with descriptor for being released - * next time descriptor is used - */ - *lmbuf = pkt; - txq->wr_p++; txq->tail++; if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/ @@ -1770,6 +2033,41 @@ xmit_end: return i; } +static void +nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + uint32_t new_ctrl, update; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + new_ctrl = 0; + + if ((mask & ETH_VLAN_FILTER_OFFLOAD) || + (mask & ETH_VLAN_FILTER_OFFLOAD)) + RTE_LOG(INFO, PMD, "Not support for ETH_VLAN_FILTER_OFFLOAD or" + " ETH_VLAN_FILTER_EXTEND"); + + /* Enable vlan strip if it is not configured yet */ + if ((mask & ETH_VLAN_STRIP_OFFLOAD) && + !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) + new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN; + + /* Disable vlan strip just if it is configured */ + if (!(mask & ETH_VLAN_STRIP_OFFLOAD) && + (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) + new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN; + + if (new_ctrl == 0) + return; + + update = NFP_NET_CFG_UPDATE_GEN; + + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return; + + hw->ctrl = new_ctrl; +} + /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */ static int nfp_net_reta_update(struct rte_eth_dev *dev, @@ -1985,14 +2283,20 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, } /* Initialise and register driver with DPDK Application */ -static struct eth_dev_ops nfp_net_eth_dev_ops = { +static const struct eth_dev_ops nfp_net_eth_dev_ops = { .dev_configure = nfp_net_configure, .dev_start = nfp_net_start, .dev_stop = nfp_net_stop, .dev_close = nfp_net_close, + .promiscuous_enable = nfp_net_promisc_enable, + .promiscuous_disable = nfp_net_promisc_disable, .link_update = nfp_net_link_update, .stats_get = nfp_net_stats_get, .stats_reset = nfp_net_stats_reset, + .dev_infos_get = nfp_net_infos_get, + .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, + .mtu_set = nfp_net_dev_mtu_set, + .vlan_offload_set = nfp_net_vlan_offload_set, .reta_update = nfp_net_reta_update, .reta_query = nfp_net_reta_query, .rss_hash_update = nfp_net_rss_hash_update, @@ -2027,6 +2331,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) return 0; pci_dev = eth_dev->pci_dev; + rte_eth_copy_pci_info(eth_dev, pci_dev); + hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; hw->subsystem_device_id = pci_dev->id.subsystem_device_id; @@ -2103,6 +2409,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n", hw->max_rx_queues, hw->max_tx_queues); + /* Initializing spinlock for reconfigs */ + rte_spinlock_init(&hw->reconfig_lock); + /* Allocating memory for mac addr */ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { @@ -2110,12 +2419,15 @@ nfp_net_init(struct rte_eth_dev *eth_dev) return -ENOMEM; } - /* Using random mac addresses for VFs */ - eth_random_addr(&hw->mac_addr[0]); + nfp_net_read_mac(hw); + + if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) + /* Using random mac addresses for VFs */ + eth_random_addr(&hw->mac_addr[0]); /* Copying mac address to DPDK eth_dev struct */ - ether_addr_copy(ð_dev->data->mac_addrs[0], - (struct ether_addr *)hw->mac_addr); + ether_addr_copy((struct ether_addr *)hw->mac_addr, + ð_dev->data->mac_addrs[0]); PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " "mac=%02x:%02x:%02x:%02x:%02x:%02x", @@ -2124,6 +2436,17 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2], hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]); + /* Registering LSC interrupt handler */ + rte_intr_callback_register(&pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, + (void *)eth_dev); + + /* enable uio intr after callback register */ + rte_intr_enable(&pci_dev->intr_handle); + + /* Telling the firmware about the LSC interrupt entry */ + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); + /* Recording current stats counters values */ nfp_net_stats_reset(eth_dev); @@ -2132,16 +2455,12 @@ nfp_net_init(struct rte_eth_dev *eth_dev) static struct rte_pci_id pci_id_nfp_net_map[] = { { - .vendor_id = PCI_VENDOR_ID_NETRONOME, - .device_id = PCI_DEVICE_ID_NFP6000_PF_NIC, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID, + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, + PCI_DEVICE_ID_NFP6000_PF_NIC) }, { - .vendor_id = PCI_VENDOR_ID_NETRONOME, - .device_id = PCI_DEVICE_ID_NFP6000_VF_NIC, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID, + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, + PCI_DEVICE_ID_NFP6000_VF_NIC) }, { .vendor_id = 0, @@ -2149,33 +2468,20 @@ static struct rte_pci_id pci_id_nfp_net_map[] = { }; static struct eth_driver rte_nfp_net_pmd = { - { - .name = "rte_nfp_net_pmd", + .pci_drv = { .id_table = pci_id_nfp_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_DETACHABLE, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = nfp_net_init, .dev_private_size = sizeof(struct nfp_net_adapter), }; -static int -nfp_net_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - PMD_INIT_LOG(INFO, "librte_pmd_nfp_net version %s\n", - NFP_NET_PMD_VERSION); - - rte_eth_driver_register(&rte_nfp_net_pmd); - return 0; -} - -static struct rte_driver rte_nfp_net_driver = { - .type = PMD_PDEV, - .init = nfp_net_pmd_init, -}; - -PMD_REGISTER_DRIVER(rte_nfp_net_driver); +RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map); +RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio"); /* * Local variables: