net/nfp: handle packets with length 0 as usual ones
[dpdk.git] / drivers / net / nfp / nfp_net.c
index 92b03c4..a76dbd4 100644 (file)
@@ -603,6 +603,20 @@ static void nfp_net_read_mac(struct nfp_net_hw *hw)
        memcpy(&hw->mac_addr[4], &tmp, 2);
 }
 
+static void
+nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
+{
+       uint32_t mac0 = *(uint32_t *)mac;
+       uint16_t mac1;
+
+       nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
+
+       mac += 4;
+       mac1 = *(uint16_t *)mac;
+       nn_writew(rte_cpu_to_be_16(mac1),
+                 hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
+}
+
 static int
 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
                           struct rte_intr_handle *intr_handle)
@@ -627,14 +641,19 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
                PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
                /* UIO just supports one queue and no LSC*/
                nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
+               intr_handle->intr_vec[0] = 0;
        } else {
                PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
-               for (i = 0; i < dev->data->nb_rx_queues; i++)
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
                        /*
                         * The first msix vector is reserved for non
                         * efd interrupts
                        */
                        nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
+                       intr_handle->intr_vec[i] = i + 1;
+                       PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i,
+                                           intr_handle->intr_vec[i]);
+               }
        }
 
        /* Avoiding TX interrupts */
@@ -684,20 +703,17 @@ nfp_net_start(struct rte_eth_dev *dev)
                intr_vector = dev->data->nb_rx_queues;
                if (rte_intr_efd_enable(intr_handle, intr_vector))
                        return -1;
-       }
 
-       if (rte_intr_dp_is_en(intr_handle))
                nfp_configure_rx_interrupt(dev, intr_handle);
+               update = NFP_NET_CFG_UPDATE_MSIX;
+       }
 
        rte_intr_enable(intr_handle);
 
        /* Enable device */
        new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
-       update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
 
-       /* Just configuring queues interrupts when necessary */
-       if (rte_intr_dp_is_en(intr_handle))
-               update |= NFP_NET_CFG_UPDATE_MSIX;
+       update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
 
        if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
                new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
@@ -2094,7 +2110,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 */
                pkt_size = pkt->pkt_len;
 
-               while (pkt_size) {
+               while (pkt) {
                        /* Copying TSO, VLAN and cksum info */
                        *txds = txd;
 
@@ -2126,13 +2142,13 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                                txq->wr_p = 0;
 
                        pkt_size -= dma_size;
-                       if (!pkt_size) {
+                       if (!pkt_size)
                                /* End of packet */
                                txds->offset_eop |= PCIE_DESC_TX_EOP;
-                       } else {
+                       else
                                txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
-                               pkt = pkt->next;
-                       }
+
+                       pkt = pkt->next;
                        /* Referencing next free TX descriptor */
                        txds = &txq->txds[txq->wr_p];
                        lmbuf = &txq->txbufs[txq->wr_p].mbuf;
@@ -2233,7 +2249,8 @@ nfp_net_reta_update(struct rte_eth_dev *dev,
                                reta &= ~(0xFF << (8 * j));
                        reta |= reta_conf[idx].reta[shift + j] << (8 * j);
                }
-               nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta);
+               nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
+                             reta);
        }
 
        update = NFP_NET_CFG_UPDATE_RSS;
@@ -2280,7 +2297,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
                if (!mask)
                        continue;
 
-               reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift);
+               reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
+                                   shift);
                for (j = 0; j < 4; j++) {
                        if (!(mask & (0x1 << j)))
                                continue;
@@ -2330,6 +2348,9 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
                                NFP_NET_CFG_RSS_IPV6_TCP |
                                NFP_NET_CFG_RSS_IPV6_UDP;
 
+       cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
+       cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
+
        /* configuring where to apply the RSS hash */
        nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
 
@@ -2539,9 +2560,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
 
        nfp_net_read_mac(hw);
 
-       if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr))
+       if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
                /* Using random mac addresses for VFs */
                eth_random_addr(&hw->mac_addr[0]);
+               nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
+       }
 
        /* Copying mac address to DPDK eth_dev struct */
        ether_addr_copy((struct ether_addr *)hw->mac_addr,