net/nfp: handle packets with length 0 as usual ones
[dpdk.git] / drivers / net / nfp / nfp_net.c
index eda87a5..a76dbd4 100644 (file)
@@ -39,8 +39,6 @@
  * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
  */
 
-#include <math.h>
-
 #include <rte_byteorder.h>
 #include <rte_common.h>
 #include <rte_log.h>
@@ -605,6 +603,20 @@ static void nfp_net_read_mac(struct nfp_net_hw *hw)
        memcpy(&hw->mac_addr[4], &tmp, 2);
 }
 
+static void
+nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
+{
+       uint32_t mac0 = *(uint32_t *)mac;
+       uint16_t mac1;
+
+       nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
+
+       mac += 4;
+       mac1 = *(uint16_t *)mac;
+       nn_writew(rte_cpu_to_be_16(mac1),
+                 hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
+}
+
 static int
 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
                           struct rte_intr_handle *intr_handle)
@@ -629,14 +641,19 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
                PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
                /* UIO just supports one queue and no LSC*/
                nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
+               intr_handle->intr_vec[0] = 0;
        } else {
                PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
-               for (i = 0; i < dev->data->nb_rx_queues; i++)
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
                        /*
                         * The first msix vector is reserved for non
                         * efd interrupts
                        */
                        nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
+                       intr_handle->intr_vec[i] = i + 1;
+                       PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i,
+                                           intr_handle->intr_vec[i]);
+               }
        }
 
        /* Avoiding TX interrupts */
@@ -647,7 +664,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
 static int
 nfp_net_start(struct rte_eth_dev *dev)
 {
-       struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        uint32_t new_ctrl, update = 0;
        struct nfp_net_hw *hw;
@@ -686,20 +703,17 @@ nfp_net_start(struct rte_eth_dev *dev)
                intr_vector = dev->data->nb_rx_queues;
                if (rte_intr_efd_enable(intr_handle, intr_vector))
                        return -1;
-       }
 
-       if (rte_intr_dp_is_en(intr_handle))
                nfp_configure_rx_interrupt(dev, intr_handle);
+               update = NFP_NET_CFG_UPDATE_MSIX;
+       }
 
        rte_intr_enable(intr_handle);
 
        /* Enable device */
        new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
-       update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
 
-       /* Just configuring queues interrupts when necessary */
-       if (rte_intr_dp_is_en(intr_handle))
-               update |= NFP_NET_CFG_UPDATE_MSIX;
+       update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
 
        if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
                new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
@@ -772,7 +786,7 @@ nfp_net_close(struct rte_eth_dev *dev)
        PMD_INIT_LOG(DEBUG, "Close");
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        /*
         * We assume that the DPDK application is stopping all the
@@ -1081,7 +1095,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+       dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
        dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1173,7 +1187,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
         * Other PMDs are just checking the DD bit in intervals of 4
         * descriptors and counting all four if the first has the DD
         * bit on. Of course, this is not accurate but can be good for
-        * perfomance. But ideally that should be done in descriptors
+        * performance. But ideally that should be done in descriptors
         * chunks belonging to the same cache line
         */
 
@@ -1201,7 +1215,7 @@ nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        int base = 0;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
                base = 1;
@@ -1221,7 +1235,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
        int base = 0;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
                base = 1;
@@ -1235,7 +1249,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 static void
 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 {
-       struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_eth_link link;
 
        memset(&link, 0, sizeof(link));
@@ -1269,7 +1283,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
        struct rte_pci_device *pci_dev;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
                /* If MSI-X auto-masking is used, clear the entry */
@@ -1334,7 +1348,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param)
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 
        nfp_net_link_update(dev, 0);
-       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
 
        nfp_net_dev_link_status_print(dev);
 
@@ -1473,7 +1487,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
         * of descriptors in log2 format
         */
        nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
-       nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc));
+       nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
 
        return 0;
 }
@@ -1628,7 +1642,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
         * of descriptors in log2 format
         */
        nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
-       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc));
+       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
 
        return 0;
 }
@@ -2096,18 +2110,20 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 */
                pkt_size = pkt->pkt_len;
 
-               /* Releasing mbuf which was prefetched above */
-               if (*lmbuf)
-                       rte_pktmbuf_free(*lmbuf);
-               /*
-                * Linking mbuf with descriptor for being released
-                * next time descriptor is used
-                */
-               *lmbuf = pkt;
-
-               while (pkt_size) {
+               while (pkt) {
                        /* Copying TSO, VLAN and cksum info */
                        *txds = txd;
+
+                       /* Releasing mbuf used by this descriptor previously*/
+                       if (*lmbuf)
+                               rte_pktmbuf_free_seg(*lmbuf);
+
+                       /*
+                        * Linking mbuf with descriptor for being released
+                        * next time descriptor is used
+                        */
+                       *lmbuf = pkt;
+
                        dma_size = pkt->data_len;
                        dma_addr = rte_mbuf_data_dma_addr(pkt);
                        PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
@@ -2126,15 +2142,16 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                                txq->wr_p = 0;
 
                        pkt_size -= dma_size;
-                       if (!pkt_size) {
+                       if (!pkt_size)
                                /* End of packet */
                                txds->offset_eop |= PCIE_DESC_TX_EOP;
-                       } else {
+                       else
                                txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
-                               pkt = pkt->next;
-                       }
+
+                       pkt = pkt->next;
                        /* Referencing next free TX descriptor */
                        txds = &txq->txds[txq->wr_p];
+                       lmbuf = &txq->txbufs[txq->wr_p].mbuf;
                        issued_descs++;
                }
                i++;
@@ -2232,7 +2249,8 @@ nfp_net_reta_update(struct rte_eth_dev *dev,
                                reta &= ~(0xFF << (8 * j));
                        reta |= reta_conf[idx].reta[shift + j] << (8 * j);
                }
-               nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta);
+               nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
+                             reta);
        }
 
        update = NFP_NET_CFG_UPDATE_RSS;
@@ -2279,7 +2297,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
                if (!mask)
                        continue;
 
-               reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift);
+               reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
+                                   shift);
                for (j = 0; j < 4; j++) {
                        if (!(mask & (0x1 << j)))
                                continue;
@@ -2329,6 +2348,9 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
                                NFP_NET_CFG_RSS_IPV6_TCP |
                                NFP_NET_CFG_RSS_IPV6_UDP;
 
+       cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
+       cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
+
        /* configuring where to apply the RSS hash */
        nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
 
@@ -2447,7 +2469,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
-       pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
        eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
@@ -2538,9 +2560,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
 
        nfp_net_read_mac(hw);
 
-       if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr))
+       if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
                /* Using random mac addresses for VFs */
                eth_random_addr(&hw->mac_addr[0]);
+               nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
+       }
 
        /* Copying mac address to DPDK eth_dev struct */
        ether_addr_copy((struct ether_addr *)hw->mac_addr,
@@ -2602,7 +2626,7 @@ static struct rte_pci_driver rte_nfp_net_pmd = {
 
 RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio-pci");
 
 /*
  * Local variables: