net/ixgbe/base: add EEE support for some PHYs
[dpdk.git] / drivers / net / nfp / nfp_net.c
index faf725c..869c55c 100644 (file)
@@ -718,10 +718,12 @@ static void
 nfp_net_close(struct rte_eth_dev *dev)
 {
        struct nfp_net_hw *hw;
+       struct rte_pci_device *pci_dev;
 
        PMD_INIT_LOG(DEBUG, "Close\n");
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       pci_dev = RTE_DEV_TO_PCI(dev->device);
 
        /*
         * We assume that the DPDK application is stopping all the
@@ -730,11 +732,11 @@ nfp_net_close(struct rte_eth_dev *dev)
 
        nfp_net_stop(dev);
 
-       rte_intr_disable(&dev->pci_dev->intr_handle);
+       rte_intr_disable(&pci_dev->intr_handle);
        nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
 
        /* unregister callback func from eal lib */
-       rte_intr_callback_unregister(&dev->pci_dev->intr_handle,
+       rte_intr_callback_unregister(&pci_dev->intr_handle,
                                     nfp_net_dev_interrupt_handler,
                                     (void *)dev);
 
@@ -816,6 +818,17 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
        struct rte_eth_link link, old;
        uint32_t nn_link_status;
 
+       static const uint32_t ls_to_ethtool[] = {
+               [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
+               [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = ETH_SPEED_NUM_NONE,
+               [NFP_NET_CFG_STS_LINK_RATE_1G]          = ETH_SPEED_NUM_1G,
+               [NFP_NET_CFG_STS_LINK_RATE_10G]         = ETH_SPEED_NUM_10G,
+               [NFP_NET_CFG_STS_LINK_RATE_25G]         = ETH_SPEED_NUM_25G,
+               [NFP_NET_CFG_STS_LINK_RATE_40G]         = ETH_SPEED_NUM_40G,
+               [NFP_NET_CFG_STS_LINK_RATE_50G]         = ETH_SPEED_NUM_50G,
+               [NFP_NET_CFG_STS_LINK_RATE_100G]        = ETH_SPEED_NUM_100G,
+       };
+
        PMD_DRV_LOG(DEBUG, "Link update\n");
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -831,8 +844,21 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
                link.link_status = ETH_LINK_UP;
 
        link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       /* Other cards can limit the tx and rx rate per VF */
-       link.link_speed = ETH_SPEED_NUM_40G;
+
+       nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
+                        NFP_NET_CFG_STS_LINK_RATE_MASK;
+
+       if ((NFD_CFG_MAJOR_VERSION_of(hw->ver) < 4) ||
+           ((NFD_CFG_MINOR_VERSION_of(hw->ver) == 4) &&
+           (NFD_CFG_MINOR_VERSION_of(hw->ver) == 0)))
+               /* We really do not know the speed wil old firmware */
+               link.link_speed = ETH_SPEED_NUM_NONE;
+       else {
+               if (nn_link_status >= RTE_DIM(ls_to_ethtool))
+                       link.link_speed = ETH_SPEED_NUM_NONE;
+               else
+                       link.link_speed = ls_to_ethtool[nn_link_status];
+       }
 
        if (old.link_status != link.link_status) {
                nfp_net_dev_atomic_write_link_status(dev, &link);
@@ -1006,7 +1032,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       dev_info->driver_name = dev->driver->pci_drv.driver.name;
+       dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
        dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1027,8 +1053,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
                dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-                                            DEV_RX_OFFLOAD_UDP_CKSUM |
-                                            DEV_RX_OFFLOAD_TCP_CKSUM;
+                                            DEV_TX_OFFLOAD_UDP_CKSUM |
+                                            DEV_TX_OFFLOAD_TCP_CKSUM;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -1055,7 +1081,9 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
        dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 
-       dev_info->speed_capa = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+       dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
+                              ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
+                              ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
 }
 
 static const uint32_t *
@@ -1122,6 +1150,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
 static void
 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 {
+       struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
        struct rte_eth_link link;
 
        memset(&link, 0, sizeof(link));
@@ -1136,8 +1165,8 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
                        (int)(dev->data->port_id));
 
        RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
-               dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
-               dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
+               pci_dev->addr.domain, pci_dev->addr.bus,
+               pci_dev->addr.devid, pci_dev->addr.function);
 }
 
 /* Interrupt configuration and handling */
@@ -1152,13 +1181,15 @@ static void
 nfp_net_irq_unmask(struct rte_eth_dev *dev)
 {
        struct nfp_net_hw *hw;
+       struct rte_pci_device *pci_dev;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       pci_dev = RTE_DEV_TO_PCI(dev->device);
 
        if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
                /* If MSI-X auto-masking is used, clear the entry */
                rte_wmb();
-               rte_intr_enable(&dev->pci_dev->intr_handle);
+               rte_intr_enable(&pci_dev->intr_handle);
        } else {
                /* Make sure all updates are written before un-masking */
                rte_wmb();
@@ -1219,7 +1250,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param)
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 
        nfp_net_link_update(dev, 0);
-       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 
        nfp_net_dev_link_status_print(dev);
 
@@ -1604,12 +1635,6 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
        hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
        hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
 
-       /*
-        * hash type is sharing the same word with input port info
-        * 31-8: input port
-        * 7:0: hash type
-        */
-       hash_type &= 0xff;
        mbuf->hash.rss = hash;
        mbuf->ol_flags |= PKT_RX_RSS_HASH;
 
@@ -1628,29 +1653,6 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
        }
 }
 
-/* nfp_net_check_port - Set mbuf in_port field */
-static void
-nfp_net_check_port(struct nfp_net_rx_desc *rxd, struct rte_mbuf *mbuf)
-{
-       uint32_t port;
-
-       if (!(rxd->rxd.flags & PCIE_DESC_RX_INGRESS_PORT)) {
-               mbuf->port = 0;
-               return;
-       }
-
-       port = rte_be_to_cpu_32(*(uint32_t *)((uint8_t *)mbuf->buf_addr +
-                                             mbuf->data_off - 8));
-
-       /*
-        * hash type is sharing the same word with input port info
-        * 31-8: input port
-        * 7:0: hash type
-        */
-       port = (uint8_t)(port >> 8);
-       mbuf->port = port;
-}
-
 static inline void
 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
 {
@@ -1707,7 +1709,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                 * DPDK just checks the queue is lower than max queues
                 * enabled. But the queue needs to be configured
                 */
-               RTE_LOG(ERR, PMD, "RX Bad queue\n");
+               RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
                return -EINVAL;
        }
 
@@ -1720,7 +1722,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                rxb = &rxq->rxbufs[idx];
                if (unlikely(rxb == NULL)) {
-                       RTE_LOG(ERR, PMD, "rxb does not exist!\n");
+                       RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
                        break;
                }
 
@@ -1740,7 +1742,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                 */
                new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
                if (unlikely(new_mb == NULL)) {
-                       RTE_LOG(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
+                       RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
                                "queue_id=%u\n", (unsigned)rxq->port_id,
                                (unsigned)rxq->qidx);
                        nfp_net_mbuf_alloc_failed(rxq);
@@ -1771,7 +1773,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                         * responsibility of avoiding it. But we have
                         * to give some info about the error
                         */
-                       RTE_LOG(ERR, PMD,
+                       RTE_LOG_DP(ERR, PMD,
                                "mbuf overflow likely due to the RX offset.\n"
                                "\t\tYour mbuf size should have extra space for"
                                " RX offset=%u bytes.\n"
@@ -1800,9 +1802,6 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                /* Checking the checksum flag */
                nfp_net_rx_cksum(rxq, rxds, mb);
 
-               /* Checking the port flag */
-               nfp_net_check_port(rxds, mb);
-
                if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
                    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
                        mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
@@ -2330,8 +2329,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
-       pci_dev = eth_dev->pci_dev;
+       pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
 
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
@@ -2400,7 +2400,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                     hw->cap & NFP_NET_CFG_CTRL_LSO     ? "TSO "     : "",
                     hw->cap & NFP_NET_CFG_CTRL_RSS     ? "RSS "     : "");
 
-       pci_dev = eth_dev->pci_dev;
        hw->ctrl = 0;
 
        hw->stride_rx = stride;
@@ -2470,8 +2469,7 @@ static struct rte_pci_id pci_id_nfp_net_map[] = {
 static struct eth_driver rte_nfp_net_pmd = {
        .pci_drv = {
                .id_table = pci_id_nfp_net_map,
-               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
-                            RTE_PCI_DRV_DETACHABLE,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
                .probe = rte_eth_dev_pci_probe,
                .remove = rte_eth_dev_pci_remove,
        },
@@ -2481,6 +2479,7 @@ static struct eth_driver rte_nfp_net_pmd = {
 
 RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio");
 
 /*
  * Local variables: