X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=54c6da9243f74156511c6e909530c8710725b660;hb=1ccdc31793aff57d5fe34041c7fa0aa9c00c7f5c;hp=048324ec941d0ac45ce0e2e6e5e0dab7a8f4449a;hpb=99d9d9d4ddd6cb2419bcdaf58a2f334046a7f511;p=dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 048324ec94..54c6da9243 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -411,149 +411,6 @@ nfp_net_configure(struct rte_eth_dev *dev) return -EINVAL; } - /* Checking RX offloads */ - if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) { - PMD_INIT_LOG(INFO, "rxmode does not support split header"); - return -EINVAL; - } - - if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) && - !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM)) - PMD_INIT_LOG(INFO, "RXCSUM not supported"); - - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { - PMD_INIT_LOG(INFO, "VLAN filter not supported"); - return -EINVAL; - } - - if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) && - !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) { - PMD_INIT_LOG(INFO, "hw vlan strip not supported"); - return -EINVAL; - } - - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) { - PMD_INIT_LOG(INFO, "VLAN extended not supported"); - return -EINVAL; - } - - if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { - PMD_INIT_LOG(INFO, "LRO not supported"); - return -EINVAL; - } - - if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) { - PMD_INIT_LOG(INFO, "QINQ STRIP not supported"); - return -EINVAL; - } - - if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { - PMD_INIT_LOG(INFO, "Outer IP checksum not supported"); - return -EINVAL; - } - - if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) { - PMD_INIT_LOG(INFO, "MACSEC strip not supported"); - return -EINVAL; - } - - if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) { - PMD_INIT_LOG(INFO, "MACSEC strip not supported"); - return -EINVAL; - } - - if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) - PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!"); - - if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) && - !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) { - PMD_INIT_LOG(INFO, "Scatter not supported"); - return -EINVAL; - } - - if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) { - PMD_INIT_LOG(INFO, "timestamp offfload not supported"); - return -EINVAL; - } - - if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) { - PMD_INIT_LOG(INFO, "security offload not supported"); - return -EINVAL; - } - - /* checking TX offloads */ - if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) && - !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) { - PMD_INIT_LOG(INFO, "vlan insert offload not supported"); - return -EINVAL; - } - - if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) && - !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) { - PMD_INIT_LOG(INFO, "TX checksum offload not supported"); - return -EINVAL; - } - - if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) { - PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported"); - return -EINVAL; - } - - if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) && - !(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) { - PMD_INIT_LOG(INFO, "TSO TCP offload not supported"); - return -EINVAL; - } - - if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) { - PMD_INIT_LOG(INFO, "TSO UDP offload not supported"); - return -EINVAL; - } - - if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { - PMD_INIT_LOG(INFO, "TX outer checksum offload not supported"); - return -EINVAL; - } - - if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) { - PMD_INIT_LOG(INFO, "QINQ insert offload not supported"); - return -EINVAL; - } - - if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO || - txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO || - txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO || - txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { - PMD_INIT_LOG(INFO, "tunneling offload not supported"); - return -EINVAL; - } - - if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) { - PMD_INIT_LOG(INFO, "TX MACSEC offload not supported"); - return -EINVAL; - } - - if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) { - PMD_INIT_LOG(INFO, "multiqueue lockfree not supported"); - return -EINVAL; - } - - if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) && - !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) { - PMD_INIT_LOG(INFO, "TX multisegs not supported"); - return -EINVAL; - } - - if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) { - PMD_INIT_LOG(INFO, "mbuf fast-free not supported"); - return -EINVAL; - } - - if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) { - PMD_INIT_LOG(INFO, "TX security offload not supported"); - return -EINVAL; - } - return 0; } @@ -666,7 +523,7 @@ nfp_net_vf_read_mac(struct nfp_net_hw *hw) uint32_t tmp; tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR)); - memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr)); + memcpy(&hw->mac_addr[0], &tmp, 4); tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4)); memcpy(&hw->mac_addr[4], &tmp, 2); @@ -1341,8 +1198,10 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH, }; - dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP | + dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 | + ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP; @@ -1575,9 +1434,9 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if ((uint32_t)mtu > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu; @@ -1600,8 +1459,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, const struct rte_memzone *tz; struct nfp_net_rxq *rxq; struct nfp_net_hw *hw; - struct rte_eth_conf *dev_conf; - struct rte_eth_rxmode *rxmode; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1615,17 +1472,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } - dev_conf = &dev->data->dev_conf; - rxmode = &dev_conf->rxmode; - - if (rx_conf->offloads != rxmode->offloads) { - PMD_DRV_LOG(ERR, "queue %u rx offloads not as port offloads", - queue_idx); - PMD_DRV_LOG(ERR, "\tport: %" PRIx64 "", rxmode->offloads); - PMD_DRV_LOG(ERR, "\tqueue: %" PRIx64 "", rx_conf->offloads); - return -EINVAL; - } - /* * Free memory prior to re-allocation if needed. This is the case after * calling nfp_net_stop @@ -1660,8 +1506,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_count = nb_desc; rxq->port_id = dev->data->port_id; rxq->rx_free_thresh = rx_conf->rx_free_thresh; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 - : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; /* @@ -1762,8 +1606,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, struct nfp_net_txq *txq; uint16_t tx_free_thresh; struct nfp_net_hw *hw; - struct rte_eth_conf *dev_conf; - struct rte_eth_txmode *txmode; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1777,15 +1619,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return -EINVAL; } - dev_conf = &dev->data->dev_conf; - txmode = &dev_conf->txmode; - - if (tx_conf->offloads != txmode->offloads) { - PMD_DRV_LOG(ERR, "queue %u tx offloads not as port offloads", - queue_idx); - return -EINVAL; - } - tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); @@ -1948,21 +1781,20 @@ nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, return; /* If IPv4 and IP checksum error, fail */ - if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) && - !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)) + if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) && + !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))) mb->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else + mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD; /* If neither UDP nor TCP return */ if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) && !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM)) return; - if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) && - !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK)) - mb->ol_flags |= PKT_RX_L4_CKSUM_BAD; - - if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) && - !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK)) + if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK)) + mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else mb->ol_flags |= PKT_RX_L4_CKSUM_BAD; } @@ -2046,6 +1878,18 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, case NFP_NET_RSS_IPV6_EX: mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; break; + case NFP_NET_RSS_IPV4_TCP: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; + break; + case NFP_NET_RSS_IPV6_TCP: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; + break; + case NFP_NET_RSS_IPV4_UDP: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; + break; + case NFP_NET_RSS_IPV6_UDP: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; + break; default: mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK; } @@ -2418,11 +2262,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txq->wr_p = 0; pkt_size -= dma_size; - if (!pkt_size) - /* End of packet */ - txds->offset_eop |= PCIE_DESC_TX_EOP; + + /* + * Making the EOP, packets with just one segment + * the priority + */ + if (likely(!pkt_size)) + txds->offset_eop = PCIE_DESC_TX_EOP; else - txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK; + txds->offset_eop = 0; pkt = pkt->next; /* Referencing next free TX descriptor */ @@ -2623,14 +2471,22 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev, rss_hf = rss_conf->rss_hf; if (rss_hf & ETH_RSS_IPV4) - cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 | - NFP_NET_CFG_RSS_IPV4_TCP | - NFP_NET_CFG_RSS_IPV4_UDP; + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP; if (rss_hf & ETH_RSS_IPV6) - cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 | - NFP_NET_CFG_RSS_IPV6_TCP | - NFP_NET_CFG_RSS_IPV6_UDP; + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6; + + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP; + + if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP; cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK; cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ; @@ -2846,6 +2702,14 @@ nfp_net_init(struct rte_eth_dev *eth_dev) pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + /* NFP can not handle DMA addresses requiring more than 40 bits */ + if (rte_mem_check_dma_mask(40)) { + RTE_LOG(ERR, PMD, "device %s can not be used:", + pci_dev->device.name); + RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n"); + return -ENODEV; + }; + if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) || (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) { port = get_pf_port_number(eth_dev->data->name); @@ -3044,6 +2908,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) ether_addr_copy((struct ether_addr *)hw->mac_addr, ð_dev->data->mac_addrs[0]); + if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) + eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; + PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " "mac=%02x:%02x:%02x:%02x:%02x:%02x", eth_dev->data->port_id, pci_dev->id.vendor_id, @@ -3130,6 +2997,8 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, if (ret) rte_eth_dev_release_port(eth_dev); + else + rte_eth_dev_probing_finish(eth_dev); rte_free(port_name); @@ -3144,8 +3013,8 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) struct nfp_cpp *cpp = nsp->cpp; int fw_f; char *fw_buf; - char fw_name[100]; - char serial[100]; + char fw_name[125]; + char serial[40]; struct stat file_stat; off_t fsize, bytes; @@ -3281,7 +3150,18 @@ static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, if (!dev) return ret; - cpp = nfp_cpp_from_device_name(dev->device.name); + /* + * When device bound to UIO, the device could be used, by mistake, + * by two DPDK apps, and the UIO driver does not avoid it. This + * could lead to a serious problem when configuring the NFP CPP + * interface. Here we avoid this telling to the CPP init code to + * use a lock file if UIO is being used. + */ + if (dev->kdrv == RTE_KDRV_VFIO) + cpp = nfp_cpp_from_device_name(dev, 0); + else + cpp = nfp_cpp_from_device_name(dev, 1); + if (!cpp) { PMD_DRV_LOG(ERR, "A CPP handle can not be obtained"); ret = -EIO; @@ -3410,14 +3290,16 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_nfp_net_pf_pmd = { .id_table = pci_id_nfp_pf_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, .probe = nfp_pf_pci_probe, .remove = eth_nfp_pci_remove, }; static struct rte_pci_driver rte_nfp_net_vf_pmd = { .id_table = pci_id_nfp_vf_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_nfp_pci_probe, .remove = eth_nfp_pci_remove, }; @@ -3429,9 +3311,7 @@ RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map); RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio"); -RTE_INIT(nfp_init_log); -static void -nfp_init_log(void) +RTE_INIT(nfp_init_log) { nfp_logtype_init = rte_log_register("pmd.net.nfp.init"); if (nfp_logtype_init >= 0)