X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=a76dbd48fa80d87b5ae8b351fe3a03b9379078da;hb=f78f0e64f772aa32bdca79a17835db8f2c57c1d9;hp=0ee7fb827549d2e302d150afce53e6c8a8341424;hpb=98a7ea332ba3da0f74ec951595d36a616165b255;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 0ee7fb8275..a76dbd48fa 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -39,8 +39,6 @@ * Netronome vNIC DPDK Poll-Mode Driver: Main entry point */ -#include - #include #include #include @@ -605,6 +603,20 @@ static void nfp_net_read_mac(struct nfp_net_hw *hw) memcpy(&hw->mac_addr[4], &tmp, 2); } +static void +nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac) +{ + uint32_t mac0 = *(uint32_t *)mac; + uint16_t mac1; + + nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR); + + mac += 4; + mac1 = *(uint16_t *)mac; + nn_writew(rte_cpu_to_be_16(mac1), + hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6); +} + static int nfp_configure_rx_interrupt(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) @@ -629,14 +641,19 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO"); /* UIO just supports one queue and no LSC*/ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0); + intr_handle->intr_vec[0] = 0; } else { PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO"); - for (i = 0; i < dev->data->nb_rx_queues; i++) + for (i = 0; i < dev->data->nb_rx_queues; i++) { /* * The first msix vector is reserved for non * efd interrupts */ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1); + intr_handle->intr_vec[i] = i + 1; + PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i, + intr_handle->intr_vec[i]); + } } /* Avoiding TX interrupts */ @@ -686,20 +703,17 @@ nfp_net_start(struct rte_eth_dev *dev) intr_vector = dev->data->nb_rx_queues; if (rte_intr_efd_enable(intr_handle, intr_vector)) return -1; - } - if (rte_intr_dp_is_en(intr_handle)) nfp_configure_rx_interrupt(dev, intr_handle); + update = NFP_NET_CFG_UPDATE_MSIX; + } rte_intr_enable(intr_handle); /* Enable device */ new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE; - update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; - /* Just configuring queues interrupts when necessary */ - if (rte_intr_dp_is_en(intr_handle)) - update |= NFP_NET_CFG_UPDATE_MSIX; + update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; @@ -1334,7 +1348,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param) struct rte_eth_dev *dev = (struct rte_eth_dev *)param; nfp_net_link_update(dev, 0); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL); nfp_net_dev_link_status_print(dev); @@ -1473,7 +1487,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, * of descriptors in log2 format */ nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma); - nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc)); + nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc)); return 0; } @@ -1628,7 +1642,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, * of descriptors in log2 format */ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma); - nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc)); + nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc)); return 0; } @@ -2096,7 +2110,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ pkt_size = pkt->pkt_len; - while (pkt_size) { + while (pkt) { /* Copying TSO, VLAN and cksum info */ *txds = txd; @@ -2128,13 +2142,13 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txq->wr_p = 0; pkt_size -= dma_size; - if (!pkt_size) { + if (!pkt_size) /* End of packet */ txds->offset_eop |= PCIE_DESC_TX_EOP; - } else { + else txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK; - pkt = pkt->next; - } + + pkt = pkt->next; /* Referencing next free TX descriptor */ txds = &txq->txds[txq->wr_p]; lmbuf = &txq->txbufs[txq->wr_p].mbuf; @@ -2235,7 +2249,8 @@ nfp_net_reta_update(struct rte_eth_dev *dev, reta &= ~(0xFF << (8 * j)); reta |= reta_conf[idx].reta[shift + j] << (8 * j); } - nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta); + nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, + reta); } update = NFP_NET_CFG_UPDATE_RSS; @@ -2282,7 +2297,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev, if (!mask) continue; - reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift); + reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + + shift); for (j = 0; j < 4; j++) { if (!(mask & (0x1 << j))) continue; @@ -2332,6 +2348,9 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, NFP_NET_CFG_RSS_IPV6_TCP | NFP_NET_CFG_RSS_IPV6_UDP; + cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK; + cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ; + /* configuring where to apply the RSS hash */ nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl); @@ -2541,9 +2560,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev) nfp_net_read_mac(hw); - if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) + if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) { /* Using random mac addresses for VFs */ eth_random_addr(&hw->mac_addr[0]); + nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); + } /* Copying mac address to DPDK eth_dev struct */ ether_addr_copy((struct ether_addr *)hw->mac_addr,