net/nfp: fix stats struct initial value
[dpdk.git] / drivers / net / nfp / nfp_net.c
index 000d339..0501156 100644 (file)
@@ -88,7 +88,7 @@ static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                  uint16_t nb_desc, unsigned int socket_id,
                                  const struct rte_eth_txconf *tx_conf);
 static int nfp_net_start(struct rte_eth_dev *dev);
-static void nfp_net_stats_get(struct rte_eth_dev *dev,
+static int nfp_net_stats_get(struct rte_eth_dev *dev,
                              struct rte_eth_stats *stats);
 static void nfp_net_stats_reset(struct rte_eth_dev *dev);
 static void nfp_net_stop(struct rte_eth_dev *dev);
@@ -106,7 +106,7 @@ static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 #define NFP_QCP_MAX_ADD        0x7f
 
 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
-       (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+       (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
 
 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
 enum nfp_qcp_ptr {
@@ -1027,7 +1027,7 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
        return -1;
 }
 
-static void
+static int
 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        int i;
@@ -1038,6 +1038,8 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
 
+       memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
+
        /* reading per RX ring stats */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
@@ -1113,8 +1115,11 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
 
-       if (stats)
+       if (stats) {
                memcpy(stats, &nfp_dev_stats, sizeof(*stats));
+               return 0;
+       }
+       return -EINVAL;
 }
 
 static void
@@ -1353,12 +1358,12 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
        nfp_net_dev_atomic_read_link_status(dev, &link);
        if (link.link_status)
                RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
-                       (int)(dev->data->port_id), (unsigned)link.link_speed,
+                       dev->data->port_id, link.link_speed,
                        link.link_duplex == ETH_LINK_FULL_DUPLEX
                        ? "full-duplex" : "half-duplex");
        else
                RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
-                       (int)(dev->data->port_id));
+                       dev->data->port_id);
 
        RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
                pci_dev->addr.domain, pci_dev->addr.bus,
@@ -1559,7 +1564,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* Saving physical and virtual addresses for the RX ring */
-       rxq->dma = (uint64_t)tz->phys_addr;
+       rxq->dma = (uint64_t)tz->iova;
        rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
 
        /* mbuf pointers array for referencing mbufs linked to RX descriptors */
@@ -1662,7 +1667,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                        "tx_free_thresh must be less than the number of TX "
                        "descriptors. (tx_free_thresh=%u port=%d "
                        "queue=%d)\n", (unsigned int)tx_free_thresh,
-                       (int)dev->data->port_id, (int)queue_idx);
+                       dev->data->port_id, (int)queue_idx);
                return -(EINVAL);
        }
 
@@ -1715,7 +1720,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        txq->txq_flags = tx_conf->txq_flags;
 
        /* Saving physical and virtual addresses for the TX ring */
-       txq->dma = (uint64_t)tz->phys_addr;
+       txq->dma = (uint64_t)tz->iova;
        txq->txds = (struct nfp_net_tx_desc *)tz->addr;
 
        /* mbuf pointers array for referencing mbufs linked to TX descriptors */
@@ -2001,9 +2006,9 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                 */
                new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
                if (unlikely(new_mb == NULL)) {
-                       RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
-                               "queue_id=%u\n", (unsigned)rxq->port_id,
-                               (unsigned)rxq->qidx);
+                       RTE_LOG_DP(DEBUG, PMD,
+                       "RX mbuf alloc failed port_id=%u queue_id=%u\n",
+                               rxq->port_id, (unsigned int)rxq->qidx);
                        nfp_net_mbuf_alloc_failed(rxq);
                        break;
                }
@@ -2064,7 +2069,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
                    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
                        mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
-                       mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+                       mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
                }
 
                /* Adding the mbuff to the mbuff array passed by the app */
@@ -2087,7 +2092,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                return nb_hold;
 
        PMD_RX_LOG(DEBUG, "RX  port_id=%u queue_id=%u, %d packets received\n",
-                  (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold);
+                  rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
 
        nb_hold += rxq->nb_rx_hold;
 
@@ -2098,7 +2103,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        rte_wmb();
        if (nb_hold > rxq->rx_free_thresh) {
                PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
-                          (unsigned)rxq->port_id, (unsigned)rxq->qidx,
+                          rxq->port_id, (unsigned int)rxq->qidx,
                           (unsigned)nb_hold, (unsigned)avail);
                nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
                nb_hold = 0;
@@ -2263,7 +2268,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        *lmbuf = pkt;
 
                        dma_size = pkt->data_len;
-                       dma_addr = rte_mbuf_data_dma_addr(pkt);
+                       dma_addr = rte_mbuf_data_iova(pkt);
                        PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
                                   "%" PRIx64 "\n", dma_addr);
 
@@ -2303,11 +2308,12 @@ xmit_end:
        return i;
 }
 
-static void
+static int
 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        uint32_t new_ctrl, update;
        struct nfp_net_hw *hw;
+       int ret;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        new_ctrl = 0;
@@ -2328,14 +2334,15 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
 
        if (new_ctrl == 0)
-               return;
+               return 0;
 
        update = NFP_NET_CFG_UPDATE_GEN;
 
-       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
-               return;
+       ret = nfp_net_reconfig(hw, new_ctrl, update);
+       if (!ret)
+               hw->ctrl = new_ctrl;
 
-       hw->ctrl = new_ctrl;
+       return ret;
 }
 
 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
@@ -2665,9 +2672,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
-       /* hotplug is not possible with multiport PF */
-       if (!hw->pf_multiport_enabled)
-               eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
@@ -2810,10 +2814,12 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                return -ENOMEM;
        }
 
-       if (hw->is_pf)
+       if (hw->is_pf) {
                nfp_net_pf_read_mac(hwport0, port);
-       else
+               nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
+       } else {
                nfp_net_vf_read_mac(hw);
+       }
 
        if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
                /* Using random mac addresses for VFs */
@@ -3011,6 +3017,22 @@ static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 
 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
 {
+       struct rte_eth_dev *eth_dev;
+       struct nfp_net_hw *hw, *hwport0;
+       int port = 0;
+
+       eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+       if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
+           (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
+               port = get_pf_port_number(eth_dev->data->name);
+               hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               hw = &hwport0[port];
+       } else {
+               hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       }
+       /* hotplug is not possible with multiport PF */
+       if (hw->pf_multiport_enabled)
+               return -ENOTSUP;
        return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
 }