bus/fslmc: update MC to 10.3.x
[dpdk.git] / drivers / net / nfp / nfp_net.c
index d06b10a..92b03c4 100644 (file)
  * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
  */
 
-#include <math.h>
-
 #include <rte_byteorder.h>
 #include <rte_common.h>
 #include <rte_log.h>
 #include <rte_debug.h>
 #include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
 #include <rte_dev.h>
 #include <rte_ether.h>
 #include <rte_malloc.h>
@@ -646,7 +645,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
 static int
 nfp_net_start(struct rte_eth_dev *dev)
 {
-       struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        uint32_t new_ctrl, update = 0;
        struct nfp_net_hw *hw;
@@ -771,7 +770,7 @@ nfp_net_close(struct rte_eth_dev *dev)
        PMD_INIT_LOG(DEBUG, "Close");
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        /*
         * We assume that the DPDK application is stopping all the
@@ -1080,7 +1079,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+       dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
        dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1172,7 +1171,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
         * Other PMDs are just checking the DD bit in intervals of 4
         * descriptors and counting all four if the first has the DD
         * bit on. Of course, this is not accurate but can be good for
-        * perfomance. But ideally that should be done in descriptors
+        * performance. But ideally that should be done in descriptors
         * chunks belonging to the same cache line
         */
 
@@ -1200,7 +1199,7 @@ nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        int base = 0;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
                base = 1;
@@ -1220,7 +1219,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
        int base = 0;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
                base = 1;
@@ -1234,7 +1233,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 static void
 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 {
-       struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_eth_link link;
 
        memset(&link, 0, sizeof(link));
@@ -1268,7 +1267,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
        struct rte_pci_device *pci_dev;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
                /* If MSI-X auto-masking is used, clear the entry */
@@ -1333,7 +1332,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param)
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 
        nfp_net_link_update(dev, 0);
-       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
 
        nfp_net_dev_link_status_print(dev);
 
@@ -1472,7 +1471,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
         * of descriptors in log2 format
         */
        nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
-       nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc));
+       nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
 
        return 0;
 }
@@ -1627,7 +1626,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
         * of descriptors in log2 format
         */
        nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
-       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc));
+       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
 
        return 0;
 }
@@ -2095,18 +2094,20 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 */
                pkt_size = pkt->pkt_len;
 
-               /* Releasing mbuf which was prefetched above */
-               if (*lmbuf)
-                       rte_pktmbuf_free(*lmbuf);
-               /*
-                * Linking mbuf with descriptor for being released
-                * next time descriptor is used
-                */
-               *lmbuf = pkt;
-
                while (pkt_size) {
                        /* Copying TSO, VLAN and cksum info */
                        *txds = txd;
+
+                       /* Releasing mbuf used by this descriptor previously*/
+                       if (*lmbuf)
+                               rte_pktmbuf_free_seg(*lmbuf);
+
+                       /*
+                        * Linking mbuf with descriptor for being released
+                        * next time descriptor is used
+                        */
+                       *lmbuf = pkt;
+
                        dma_size = pkt->data_len;
                        dma_addr = rte_mbuf_data_dma_addr(pkt);
                        PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
@@ -2134,6 +2135,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        }
                        /* Referencing next free TX descriptor */
                        txds = &txq->txds[txq->wr_p];
+                       lmbuf = &txq->txbufs[txq->wr_p].mbuf;
                        issued_descs++;
                }
                i++;
@@ -2446,7 +2448,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
-       pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
        eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
@@ -2580,20 +2582,28 @@ static const struct rte_pci_id pci_id_nfp_net_map[] = {
        },
 };
 
-static struct eth_driver rte_nfp_net_pmd = {
-       .pci_drv = {
-               .id_table = pci_id_nfp_net_map,
-               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
-               .probe = rte_eth_dev_pci_probe,
-               .remove = rte_eth_dev_pci_remove,
-       },
-       .eth_dev_init = nfp_net_init,
-       .dev_private_size = sizeof(struct nfp_net_adapter),
+static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+       struct rte_pci_device *pci_dev)
+{
+       return rte_eth_dev_pci_generic_probe(pci_dev,
+               sizeof(struct nfp_net_adapter), nfp_net_init);
+}
+
+static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
+{
+       return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_nfp_net_pmd = {
+       .id_table = pci_id_nfp_net_map,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+       .probe = eth_nfp_pci_probe,
+       .remove = eth_nfp_pci_remove,
 };
 
-RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio-pci");
 
 /*
  * Local variables: