+ nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
+
+ nfp_dev_stats.ibytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+
+ nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
+
+ nfp_dev_stats.opackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+
+ nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
+
+ nfp_dev_stats.obytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+
+ nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
+
+ /* reading general device stats */
+ nfp_dev_stats.ierrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+
+ nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
+
+ nfp_dev_stats.oerrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+
+ nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
+
+ /* RX ring mbuf allocation failures */
+ nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+
+ nfp_dev_stats.imissed =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+
+ nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
+
+ if (stats)
+ memcpy(stats, &nfp_dev_stats, sizeof(*stats));
+}
+
+static void
+nfp_net_stats_reset(struct rte_eth_dev *dev)
+{
+ int i;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * hw->eth_stats_base records the per counter starting point.
+ * Lets update it now
+ */
+
+ /* reading per RX ring stats */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ hw->eth_stats_base.q_ipackets[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+
+ hw->eth_stats_base.q_ibytes[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+ }
+
+ /* reading per TX ring stats */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ hw->eth_stats_base.q_opackets[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+
+ hw->eth_stats_base.q_obytes[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+ }
+
+ hw->eth_stats_base.ipackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+
+ hw->eth_stats_base.ibytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+
+ hw->eth_stats_base.opackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+
+ hw->eth_stats_base.obytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+
+ /* reading general device stats */
+ hw->eth_stats_base.ierrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+
+ hw->eth_stats_base.oerrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+
+ /* RX ring mbuf allocation failures */
+ dev->data->rx_mbuf_alloc_failed = 0;
+
+ hw->eth_stats_base.imissed =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+}
+
+static void
+nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
+ dev_info->min_rx_bufsize = ETHER_MIN_MTU;
+ dev_info->max_rx_pktlen = hw->mtu;
+ /* Next should change when PF support is implemented */
+ dev_info->max_mac_addrs = 1;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = DEFAULT_RX_PTHRESH,
+ .hthresh = DEFAULT_RX_HTHRESH,
+ .wthresh = DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = DEFAULT_TX_PTHRESH,
+ .hthresh = DEFAULT_TX_HTHRESH,
+ .wthresh = DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ };
+
+ dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
+ dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
+
+ dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
+ ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
+ ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+}
+
+static const uint32_t *
+nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to nfp_net_set_hash() */
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_MASK,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == nfp_net_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
+static uint32_t
+nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct nfp_net_rxq *rxq;
+ struct nfp_net_rx_desc *rxds;
+ uint32_t idx;
+ uint32_t count;
+
+ rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
+
+ idx = rxq->rd_p;
+
+ count = 0;
+
+ /*
+ * Other PMDs are just checking the DD bit in intervals of 4
+ * descriptors and counting all four if the first has the DD
+ * bit on. Of course, this is not accurate but can be good for
+ * performance. But ideally that should be done in descriptors
+ * chunks belonging to the same cache line
+ */
+
+ while (count < rxq->rx_count) {
+ rxds = &rxq->rxds[idx];
+ if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+ break;
+
+ count++;
+ idx++;
+
+ /* Wrapping? */
+ if ((idx) == rxq->rx_count)
+ idx = 0;
+ }
+
+ return count;
+}
+
+static int
+nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev;
+ struct nfp_net_hw *hw;
+ int base = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
+ base = 1;
+
+ /* Make sure all updates are written before un-masking */
+ rte_wmb();
+ nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
+ NFP_NET_CFG_ICR_UNMASKED);
+ return 0;
+}
+
+static int
+nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev;
+ struct nfp_net_hw *hw;
+ int base = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
+ base = 1;
+
+ /* Make sure all updates are written before un-masking */
+ rte_wmb();
+ nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
+ return 0;
+}
+
+static void
+nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_eth_link link;
+
+ memset(&link, 0, sizeof(link));
+ nfp_net_dev_atomic_read_link_status(dev, &link);
+ if (link.link_status)
+ RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
+ (int)(dev->data->port_id), (unsigned)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX
+ ? "full-duplex" : "half-duplex");
+ else
+ RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
+ (int)(dev->data->port_id));
+
+ RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+}
+
+/* Interrupt configuration and handling */
+
+/*
+ * nfp_net_irq_unmask - Unmask an interrupt
+ *
+ * If MSI-X auto-masking is enabled clear the mask bit, otherwise
+ * clear the ICR for the entry.
+ */
+static void
+nfp_net_irq_unmask(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *hw;
+ struct rte_pci_device *pci_dev;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
+ /* If MSI-X auto-masking is used, clear the entry */
+ rte_wmb();
+ rte_intr_enable(&pci_dev->intr_handle);
+ } else {
+ /* Make sure all updates are written before un-masking */
+ rte_wmb();
+ nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
+ NFP_NET_CFG_ICR_UNMASKED);
+ }
+}
+
+static void
+nfp_net_dev_interrupt_handler(void *param)
+{
+ int64_t timeout;
+ struct rte_eth_link link;
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
+
+ /* get the link status */
+ memset(&link, 0, sizeof(link));
+ nfp_net_dev_atomic_read_link_status(dev, &link);
+
+ nfp_net_link_update(dev, 0);
+
+ /* likely to up */
+ if (!link.link_status) {
+ /* handle it 1 sec later, wait it being stable */
+ timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
+ /* likely to down */
+ } else {
+ /* handle it 4 sec later, wait it being stable */
+ timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
+ }
+
+ if (rte_eal_alarm_set(timeout * 1000,
+ nfp_net_dev_interrupt_delayed_handler,
+ (void *)dev) < 0) {
+ RTE_LOG(ERR, PMD, "Error setting alarm");
+ /* Unmasking */
+ nfp_net_irq_unmask(dev);
+ }
+}
+
+/*
+ * Interrupt handler which shall be registered for alarm callback for delayed
+ * handling specific interrupt to wait for the stable nic state. As the NIC
+ * interrupt state is not stable for nfp after link is just down, it needs
+ * to wait 4 seconds to get the stable status.
+ *
+ * @param handle Pointer to interrupt handle.
+ * @param param The address of parameter (struct rte_eth_dev *)
+ *
+ * @return void
+ */
+static void
+nfp_net_dev_interrupt_delayed_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ nfp_net_link_update(dev, 0);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
+
+ nfp_net_dev_link_status_print(dev);
+
+ /* Unmasking */
+ nfp_net_irq_unmask(dev);
+}
+
+static int
+nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
+ return -EINVAL;
+
+ /* switch to jumbo mode if needed */
+ if ((uint32_t)mtu > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
+
+ /* writing to configuration space */
+ nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
+
+ hw->mtu = mtu;
+
+ return 0;
+}
+
+static int
+nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *tz;
+ struct nfp_net_rxq *rxq;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Validating number of descriptors */
+ if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
+ (nb_desc > NFP_NET_MAX_RX_DESC) ||
+ (nb_desc < NFP_NET_MIN_RX_DESC)) {
+ RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Free memory prior to re-allocation if needed. This is the case after
+ * calling nfp_net_stop
+ */
+ if (dev->data->rx_queues[queue_idx]) {
+ nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocating rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ return -ENOMEM;
+
+ /* Hw queues mapping based on firmware confifguration */
+ rxq->qidx = queue_idx;
+ rxq->fl_qcidx = queue_idx * hw->stride_rx;
+ rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
+ rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
+ rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
+
+ /*
+ * Tracking mbuf size for detecting a potential mbuf overflow due to
+ * RX offset
+ */
+ rxq->mem_pool = mp;
+ rxq->mbuf_size = rxq->mem_pool->elt_size;
+ rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
+ hw->flbufsz = rxq->mbuf_size;
+
+ rxq->rx_count = nb_desc;
+ rxq->port_id = dev->data->port_id;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0
+ : ETHER_CRC_LEN);
+ rxq->drop_en = rx_conf->rx_drop_en;
+
+ /*
+ * Allocate RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ sizeof(struct nfp_net_rx_desc) *
+ NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
+ socket_id);
+
+ if (tz == NULL) {
+ RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
+ nfp_net_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ /* Saving physical and virtual addresses for the RX ring */
+ rxq->dma = (uint64_t)tz->phys_addr;
+ rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
+
+ /* mbuf pointers array for referencing mbufs linked to RX descriptors */
+ rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
+ sizeof(*rxq->rxbufs) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->rxbufs == NULL) {
+ nfp_net_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
+ rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
+
+ nfp_net_reset_rx_queue(rxq);
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ rxq->hw = hw;
+
+ /*
+ * Telling the HW about the physical address of the RX ring and number
+ * of descriptors in log2 format
+ */
+ nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
+
+ return 0;
+}
+
+static int
+nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
+{
+ struct nfp_net_rx_buff *rxe = rxq->rxbufs;
+ uint64_t dma_addr;
+ unsigned i;
+
+ PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n",