net/nfp: fix stats struct initial value
[dpdk.git] / drivers / net / nfp / nfp_net.c
index 251a1c6..0501156 100644 (file)
@@ -88,7 +88,7 @@ static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                  uint16_t nb_desc, unsigned int socket_id,
                                  const struct rte_eth_txconf *tx_conf);
 static int nfp_net_start(struct rte_eth_dev *dev);
-static void nfp_net_stats_get(struct rte_eth_dev *dev,
+static int nfp_net_stats_get(struct rte_eth_dev *dev,
                              struct rte_eth_stats *stats);
 static void nfp_net_stats_reset(struct rte_eth_dev *dev);
 static void nfp_net_stop(struct rte_eth_dev *dev);
@@ -106,7 +106,7 @@ static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 #define NFP_QCP_MAX_ADD        0x7f
 
 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
-       (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+       (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
 
 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
 enum nfp_qcp_ptr {
@@ -488,10 +488,6 @@ nfp_net_configure(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
-       /* Supporting VLAN insertion by default */
-       if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
-               new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
-
        if (rxmode->jumbo_frame)
                /* this is handled in rte_eth_dev_configure */
 
@@ -505,6 +501,32 @@ nfp_net_configure(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
+       /* If next capabilities are supported, configure them by default */
+
+       /* VLAN insertion */
+       if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
+               new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+
+       /* L2 broadcast */
+       if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
+               new_ctrl |= NFP_NET_CFG_CTRL_L2BC;
+
+       /* L2 multicast */
+       if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
+               new_ctrl |= NFP_NET_CFG_CTRL_L2MC;
+
+       /* TX checksum offload */
+       if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
+               new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+
+       /* LSO offload */
+       if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+               new_ctrl |= NFP_NET_CFG_CTRL_LSO;
+
+       /* RX gather */
+       if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
+               new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
+
        if (!new_ctrl)
                return 0;
 
@@ -593,7 +615,55 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
        hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
 }
 
-static void nfp_net_read_mac(struct nfp_net_hw *hw)
+#define ETH_ADDR_LEN   6
+
+static void
+nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src)
+{
+       int i;
+
+       for (i = 0; i < ETH_ADDR_LEN; i++)
+               dst[ETH_ADDR_LEN - i - 1] = src[i];
+}
+
+static int
+nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
+{
+       union eth_table_entry *entry;
+       int idx, i;
+
+       idx = port;
+       entry = hw->eth_table;
+
+       /* Reading NFP ethernet table obtained before */
+       for (i = 0; i < NSP_ETH_MAX_COUNT; i++) {
+               if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) {
+                       /* port not in use */
+                       entry++;
+                       continue;
+               }
+               if (idx == 0)
+                       break;
+               idx--;
+               entry++;
+       }
+
+       if (i == NSP_ETH_MAX_COUNT)
+               return -EINVAL;
+
+       /*
+        * hw points to port0 private data. We need hw now pointing to
+        * right port.
+        */
+       hw += port;
+       nfp_eth_copy_mac_reverse((uint8_t *)&hw->mac_addr,
+                                (uint8_t *)&entry->mac_addr);
+
+       return 0;
+}
+
+static void
+nfp_net_vf_read_mac(struct nfp_net_hw *hw)
 {
        uint32_t tmp;
 
@@ -940,17 +1010,10 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
        nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
                         NFP_NET_CFG_STS_LINK_RATE_MASK;
 
-       if ((NFD_CFG_MAJOR_VERSION_of(hw->ver) < 4) ||
-           ((NFD_CFG_MINOR_VERSION_of(hw->ver) == 4) &&
-           (NFD_CFG_MINOR_VERSION_of(hw->ver) == 0)))
-               /* We really do not know the speed wil old firmware */
+       if (nn_link_status >= RTE_DIM(ls_to_ethtool))
                link.link_speed = ETH_SPEED_NUM_NONE;
-       else {
-               if (nn_link_status >= RTE_DIM(ls_to_ethtool))
-                       link.link_speed = ETH_SPEED_NUM_NONE;
-               else
-                       link.link_speed = ls_to_ethtool[nn_link_status];
-       }
+       else
+               link.link_speed = ls_to_ethtool[nn_link_status];
 
        if (old.link_status != link.link_status) {
                nfp_net_dev_atomic_write_link_status(dev, &link);
@@ -964,7 +1027,7 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
        return -1;
 }
 
-static void
+static int
 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        int i;
@@ -975,6 +1038,8 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
 
+       memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
+
        /* reading per RX ring stats */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
@@ -1050,8 +1115,11 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
 
-       if (stats)
+       if (stats) {
                memcpy(stats, &nfp_dev_stats, sizeof(*stats));
+               return 0;
+       }
+       return -EINVAL;
 }
 
 static void
@@ -1170,6 +1238,11 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                             ETH_TXQ_FLAGS_NOOFFLOADS,
        };
 
+       dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
+                                          ETH_RSS_NONFRAG_IPV4_UDP |
+                                          ETH_RSS_NONFRAG_IPV6_TCP |
+                                          ETH_RSS_NONFRAG_IPV6_UDP;
+
        dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
        dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 
@@ -1285,12 +1358,12 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
        nfp_net_dev_atomic_read_link_status(dev, &link);
        if (link.link_status)
                RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
-                       (int)(dev->data->port_id), (unsigned)link.link_speed,
+                       dev->data->port_id, link.link_speed,
                        link.link_duplex == ETH_LINK_FULL_DUPLEX
                        ? "full-duplex" : "half-duplex");
        else
                RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
-                       (int)(dev->data->port_id));
+                       dev->data->port_id);
 
        RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
                pci_dev->addr.domain, pci_dev->addr.bus,
@@ -1491,7 +1564,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* Saving physical and virtual addresses for the RX ring */
-       rxq->dma = (uint64_t)tz->phys_addr;
+       rxq->dma = (uint64_t)tz->iova;
        rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
 
        /* mbuf pointers array for referencing mbufs linked to RX descriptors */
@@ -1594,7 +1667,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                        "tx_free_thresh must be less than the number of TX "
                        "descriptors. (tx_free_thresh=%u port=%d "
                        "queue=%d)\n", (unsigned int)tx_free_thresh,
-                       (int)dev->data->port_id, (int)queue_idx);
+                       dev->data->port_id, (int)queue_idx);
                return -(EINVAL);
        }
 
@@ -1647,7 +1720,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        txq->txq_flags = tx_conf->txq_flags;
 
        /* Saving physical and virtual addresses for the TX ring */
-       txq->dma = (uint64_t)tz->phys_addr;
+       txq->dma = (uint64_t)tz->iova;
        txq->txds = (struct nfp_net_tx_desc *)tz->addr;
 
        /* mbuf pointers array for referencing mbufs linked to TX descriptors */
@@ -1933,9 +2006,9 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                 */
                new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
                if (unlikely(new_mb == NULL)) {
-                       RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
-                               "queue_id=%u\n", (unsigned)rxq->port_id,
-                               (unsigned)rxq->qidx);
+                       RTE_LOG_DP(DEBUG, PMD,
+                       "RX mbuf alloc failed port_id=%u queue_id=%u\n",
+                               rxq->port_id, (unsigned int)rxq->qidx);
                        nfp_net_mbuf_alloc_failed(rxq);
                        break;
                }
@@ -1996,7 +2069,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
                    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
                        mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
-                       mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+                       mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
                }
 
                /* Adding the mbuff to the mbuff array passed by the app */
@@ -2019,7 +2092,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                return nb_hold;
 
        PMD_RX_LOG(DEBUG, "RX  port_id=%u queue_id=%u, %d packets received\n",
-                  (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold);
+                  rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
 
        nb_hold += rxq->nb_rx_hold;
 
@@ -2030,7 +2103,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        rte_wmb();
        if (nb_hold > rxq->rx_free_thresh) {
                PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
-                          (unsigned)rxq->port_id, (unsigned)rxq->qidx,
+                          rxq->port_id, (unsigned int)rxq->qidx,
                           (unsigned)nb_hold, (unsigned)avail);
                nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
                nb_hold = 0;
@@ -2195,7 +2268,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        *lmbuf = pkt;
 
                        dma_size = pkt->data_len;
-                       dma_addr = rte_mbuf_data_dma_addr(pkt);
+                       dma_addr = rte_mbuf_data_iova(pkt);
                        PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
                                   "%" PRIx64 "\n", dma_addr);
 
@@ -2235,11 +2308,12 @@ xmit_end:
        return i;
 }
 
-static void
+static int
 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        uint32_t new_ctrl, update;
        struct nfp_net_hw *hw;
+       int ret;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        new_ctrl = 0;
@@ -2260,14 +2334,15 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
 
        if (new_ctrl == 0)
-               return;
+               return 0;
 
        update = NFP_NET_CFG_UPDATE_GEN;
 
-       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
-               return;
+       ret = nfp_net_reconfig(hw, new_ctrl, update);
+       if (!ret)
+               hw->ctrl = new_ctrl;
 
-       hw->ctrl = new_ctrl;
+       return ret;
 }
 
 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
@@ -2597,9 +2672,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                return 0;
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
-       /* hotplug is not possible with multiport PF */
-       if (!hw->pf_multiport_enabled)
-               eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
@@ -2676,6 +2748,10 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
 
                /* vNIC PF tx/rx BARs are a subset of PF PCI device */
                hwport0->hw_queues += bar_offset;
+
+               /* Lets seize the chance to read eth table from hw */
+               if (nfp_nsp_eth_read_table(nspu_desc, &hw->eth_table))
+                       return -ENODEV;
        }
 
        if (hw->is_pf) {
@@ -2707,8 +2783,10 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
 
        PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
                     hw->ver, hw->max_mtu);
-       PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s", hw->cap,
+       PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap,
                     hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
+                    hw->cap & NFP_NET_CFG_CTRL_L2BC    ? "L2BCFILT " : "",
+                    hw->cap & NFP_NET_CFG_CTRL_L2MC    ? "L2MCFILT " : "",
                     hw->cap & NFP_NET_CFG_CTRL_RXCSUM  ? "RXCSUM "  : "",
                     hw->cap & NFP_NET_CFG_CTRL_TXCSUM  ? "TXCSUM "  : "",
                     hw->cap & NFP_NET_CFG_CTRL_RXVLAN  ? "RXVLAN "  : "",
@@ -2736,7 +2814,12 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                return -ENOMEM;
        }
 
-       nfp_net_read_mac(hw);
+       if (hw->is_pf) {
+               nfp_net_pf_read_mac(hwport0, port);
+               nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
+       } else {
+               nfp_net_vf_read_mac(hw);
+       }
 
        if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
                /* Using random mac addresses for VFs */
@@ -2934,6 +3017,22 @@ static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 
 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
 {
+       struct rte_eth_dev *eth_dev;
+       struct nfp_net_hw *hw, *hwport0;
+       int port = 0;
+
+       eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+       if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
+           (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
+               port = get_pf_port_number(eth_dev->data->name);
+               hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               hw = &hwport0[port];
+       } else {
+               hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       }
+       /* hotplug is not possible with multiport PF */
+       if (hw->pf_multiport_enabled)
+               return -ENOTSUP;
        return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
 }