net/nfp: support hardware RSS v2
[dpdk.git] / drivers / net / nfp / nfp_net.c
index 4eb032c..bedd4b6 100644 (file)
@@ -378,8 +378,6 @@ nfp_net_configure(struct rte_eth_dev *dev)
        struct rte_eth_conf *dev_conf;
        struct rte_eth_rxmode *rxmode;
        struct rte_eth_txmode *txmode;
-       uint32_t new_ctrl = 0;
-       uint32_t update = 0;
        struct nfp_net_hw *hw;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -405,96 +403,154 @@ nfp_net_configure(struct rte_eth_dev *dev)
        }
 
        /* Checking RX mode */
-       if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
-               if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-                       update = NFP_NET_CFG_UPDATE_RSS;
-                       new_ctrl = NFP_NET_CFG_CTRL_RSS;
-               } else {
-                       PMD_INIT_LOG(INFO, "RSS not supported");
-                       return -EINVAL;
-               }
+       if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+           !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
+               PMD_INIT_LOG(INFO, "RSS not supported");
+               return -EINVAL;
        }
 
-       if (rxmode->split_hdr_size) {
+       /* Checking RX offloads */
+       if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
                PMD_INIT_LOG(INFO, "rxmode does not support split header");
                return -EINVAL;
        }
 
-       if (rxmode->hw_ip_checksum) {
-               if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
-                       new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
-               } else {
-                       PMD_INIT_LOG(INFO, "RXCSUM not supported");
-                       return -EINVAL;
-               }
-       }
+       if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
+               PMD_INIT_LOG(INFO, "RXCSUM not supported");
 
-       if (rxmode->hw_vlan_filter) {
+       if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
                PMD_INIT_LOG(INFO, "VLAN filter not supported");
                return -EINVAL;
        }
 
-       if (rxmode->hw_vlan_strip) {
-               if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
-                       new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
-               } else {
-                       PMD_INIT_LOG(INFO, "hw vlan strip not supported");
-                       return -EINVAL;
-               }
+       if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
+               PMD_INIT_LOG(INFO, "hw vlan strip not supported");
+               return -EINVAL;
        }
 
-       if (rxmode->hw_vlan_extend) {
+       if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
                PMD_INIT_LOG(INFO, "VLAN extended not supported");
                return -EINVAL;
        }
 
-       if (rxmode->jumbo_frame)
-               hw->mtu = rxmode->max_rx_pkt_len;
+       if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+               PMD_INIT_LOG(INFO, "LRO not supported");
+               return -EINVAL;
+       }
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+               PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
+               return -EINVAL;
+       }
 
-       if (!rxmode->hw_strip_crc)
-               PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable");
+       if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
+               PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
+               return -EINVAL;
+       }
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
+               PMD_INIT_LOG(INFO, "MACSEC strip not supported");
+               return -EINVAL;
+       }
 
-       if (rxmode->enable_scatter) {
+       if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
+               PMD_INIT_LOG(INFO, "MACSEC strip not supported");
+               return -EINVAL;
+       }
+
+       if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
+               PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
+
+       if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
                PMD_INIT_LOG(INFO, "Scatter not supported");
                return -EINVAL;
        }
 
-       /* If next capabilities are supported, configure them by default */
+       if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+               PMD_INIT_LOG(INFO, "timestamp offfload not supported");
+               return -EINVAL;
+       }
 
-       /* VLAN insertion */
-       if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
-               new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+       if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
+               PMD_INIT_LOG(INFO, "security offload not supported");
+               return -EINVAL;
+       }
 
-       /* L2 broadcast */
-       if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
-               new_ctrl |= NFP_NET_CFG_CTRL_L2BC;
+       /* checking TX offloads */
+       if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
+               PMD_INIT_LOG(INFO, "vlan insert offload not supported");
+               return -EINVAL;
+       }
 
-       /* L2 multicast */
-       if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
-               new_ctrl |= NFP_NET_CFG_CTRL_L2MC;
+       if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
+               PMD_INIT_LOG(INFO, "TX checksum offload not supported");
+               return -EINVAL;
+       }
 
-       /* TX checksum offload */
-       if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
-               new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+       if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
+               PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
+               return -EINVAL;
+       }
 
-       /* LSO offload */
-       if (hw->cap & NFP_NET_CFG_CTRL_LSO)
-               new_ctrl |= NFP_NET_CFG_CTRL_LSO;
+       if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) {
+               PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
+               return -EINVAL;
+       }
 
-       /* RX gather */
-       if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
-               new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
+       if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
+               PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
+               return -EINVAL;
+       }
 
-       if (!new_ctrl)
-               return 0;
+       if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
+               PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
+               return -EINVAL;
+       }
 
-       update |= NFP_NET_CFG_UPDATE_GEN;
+       if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
+               PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
+               return -EINVAL;
+       }
 
-       nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
-       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
-               return -EIO;
+       if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
+           txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
+           txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
+           txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
+               PMD_INIT_LOG(INFO, "tunneling offload not supported");
+               return -EINVAL;
+       }
 
-       hw->ctrl = new_ctrl;
+       if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+               PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
+               return -EINVAL;
+       }
+
+       if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
+               PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
+               return -EINVAL;
+       }
+
+       if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
+               PMD_INIT_LOG(INFO, "TX multisegs  not supported");
+               return -EINVAL;
+       }
+
+       if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+               PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
+               return -EINVAL;
+       }
+
+       if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
+               PMD_INIT_LOG(INFO, "TX security offload not supported");
+               return -EINVAL;
+       }
 
        return 0;
 }
@@ -672,15 +728,75 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
        return 0;
 }
 
+static uint32_t
+nfp_check_offloads(struct rte_eth_dev *dev)
+{
+       struct nfp_net_hw *hw;
+       struct rte_eth_conf *dev_conf;
+       struct rte_eth_rxmode *rxmode;
+       struct rte_eth_txmode *txmode;
+       uint32_t ctrl = 0;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       dev_conf = &dev->data->dev_conf;
+       rxmode = &dev_conf->rxmode;
+       txmode = &dev_conf->txmode;
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+               if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+                       ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+       }
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+               if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
+                       ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+       }
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+               hw->mtu = rxmode->max_rx_pkt_len;
+
+       if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+               ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+
+       /* L2 broadcast */
+       if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
+               ctrl |= NFP_NET_CFG_CTRL_L2BC;
+
+       /* L2 multicast */
+       if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
+               ctrl |= NFP_NET_CFG_CTRL_L2MC;
+
+       /* TX checksum offload */
+       if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+           txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
+           txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+               ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+
+       /* LSO offload */
+       if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+               if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+                       ctrl |= NFP_NET_CFG_CTRL_LSO;
+               else
+                       ctrl |= NFP_NET_CFG_CTRL_LSO2;
+       }
+
+       /* RX gather */
+       if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+               ctrl |= NFP_NET_CFG_CTRL_GATHER;
+
+       return ctrl;
+}
+
 static int
 nfp_net_start(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
-       struct rte_eth_conf *dev_conf;
-       struct rte_eth_rxmode *rxmode;
        uint32_t new_ctrl, update = 0;
        struct nfp_net_hw *hw;
+       struct rte_eth_conf *dev_conf;
+       struct rte_eth_rxmode *rxmode;
        uint32_t intr_vector;
        int ret;
 
@@ -691,9 +807,6 @@ nfp_net_start(struct rte_eth_dev *dev)
        /* Disabling queues just in case... */
        nfp_net_disable_queues(dev);
 
-       /* Writing configuration parameters in the device */
-       nfp_net_params_setup(hw);
-
        /* Enabling the required queues in the device */
        nfp_net_enable_queues(dev);
 
@@ -728,21 +841,22 @@ nfp_net_start(struct rte_eth_dev *dev)
 
        rte_intr_enable(intr_handle);
 
+       new_ctrl = nfp_check_offloads(dev);
+
+       /* Writing configuration parameters in the device */
+       nfp_net_params_setup(hw);
+
        dev_conf = &dev->data->dev_conf;
        rxmode = &dev_conf->rxmode;
 
-       /* Checking RX mode */
        if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
-               if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-                       if (!nfp_net_rss_config_default(dev))
-                               update |= NFP_NET_CFG_UPDATE_RSS;
-               } else {
-                       PMD_INIT_LOG(INFO, "RSS not supported");
-                       return -EINVAL;
-               }
+               nfp_net_rss_config_default(dev);
+               update |= NFP_NET_CFG_UPDATE_RSS;
+               new_ctrl |= NFP_NET_CFG_CTRL_RSS;
        }
+
        /* Enable device */
-       new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
+       new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
 
        update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
 
@@ -1143,7 +1257,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
        dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1159,6 +1272,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                                             DEV_RX_OFFLOAD_UDP_CKSUM |
                                             DEV_RX_OFFLOAD_TCP_CKSUM;
 
+       dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
        if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
                dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
 
@@ -1167,6 +1282,12 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                                             DEV_TX_OFFLOAD_UDP_CKSUM |
                                             DEV_TX_OFFLOAD_TCP_CKSUM;
 
+       if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
+               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+       if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
+               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
                        .pthresh = DEFAULT_RX_PTHRESH,
@@ -1185,8 +1306,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                },
                .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
                .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
-               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-                            ETH_TXQ_FLAGS_NOOFFLOADS,
        };
 
        dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
@@ -1200,9 +1319,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
                               ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
                               ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
-
-       if (hw->cap & NFP_NET_CFG_CTRL_LSO)
-               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
 }
 
 static const uint32_t *
@@ -1451,6 +1567,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
        const struct rte_memzone *tz;
        struct nfp_net_rxq *rxq;
        struct nfp_net_hw *hw;
+       struct rte_eth_conf *dev_conf;
+       struct rte_eth_rxmode *rxmode;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1464,6 +1582,17 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       dev_conf = &dev->data->dev_conf;
+       rxmode = &dev_conf->rxmode;
+
+       if (rx_conf->offloads != rxmode->offloads) {
+               RTE_LOG(ERR, PMD, "queue %u rx offloads not as port offloads\n",
+                                 queue_idx);
+               RTE_LOG(ERR, PMD, "\tport: %" PRIx64 "\n", rxmode->offloads);
+               RTE_LOG(ERR, PMD, "\tqueue: %" PRIx64 "\n", rx_conf->offloads);
+               return -EINVAL;
+       }
+
        /*
         * Free memory prior to re-allocation if needed. This is the case after
         * calling nfp_net_stop
@@ -1600,6 +1729,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        struct nfp_net_txq *txq;
        uint16_t tx_free_thresh;
        struct nfp_net_hw *hw;
+       struct rte_eth_conf *dev_conf;
+       struct rte_eth_txmode *txmode;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1613,6 +1744,15 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                return -EINVAL;
        }
 
+       dev_conf = &dev->data->dev_conf;
+       txmode = &dev_conf->txmode;
+
+       if (tx_conf->offloads != txmode->offloads) {
+               RTE_LOG(ERR, PMD, "queue %u tx offloads not as port offloads",
+                                 queue_idx);
+               return -EINVAL;
+       }
+
        tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
                                    tx_conf->tx_free_thresh :
                                    DEFAULT_TX_FREE_THRESH);
@@ -1672,7 +1812,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
 
        txq->port_id = dev->data->port_id;
-       txq->txq_flags = tx_conf->txq_flags;
 
        /* Saving physical and virtual addresses for the TX ring */
        txq->dma = (uint64_t)tz->iova;
@@ -1712,7 +1851,7 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
        uint64_t ol_flags;
        struct nfp_net_hw *hw = txq->hw;
 
-       if (!(hw->cap & NFP_NET_CFG_CTRL_LSO))
+       if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
                goto clean_txd;
 
        ol_flags = mb->ol_flags;
@@ -1720,15 +1859,19 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
        if (!(ol_flags & PKT_TX_TCP_SEG))
                goto clean_txd;
 
-       txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len;
-       txd->lso = rte_cpu_to_le_16(mb->tso_segsz);
+       txd->l3_offset = mb->l2_len;
+       txd->l4_offset = mb->l2_len + mb->l3_len;
+       txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
+       txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
        txd->flags = PCIE_DESC_TX_LSO;
        return;
 
 clean_txd:
        txd->flags = 0;
+       txd->l3_offset = 0;
        txd->l4_offset = 0;
-       txd->lso = 0;
+       txd->lso_hdrlen = 0;
+       txd->mss = 0;
 }
 
 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
@@ -1814,14 +1957,10 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
        if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
                return;
 
-       if (NFD_CFG_MAJOR_VERSION_of(hw->ver) <= 3) {
-               if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
-                       return;
-
-               hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
-               hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
-
-       } else if (NFP_DESC_META_LEN(rxd)) {
+       /* this is true for new firmwares */
+       if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
+           (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
+            NFP_DESC_META_LEN(rxd))) {
                /*
                 * new metadata api:
                 * <----  32 bit  ----->
@@ -1854,7 +1993,11 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
                        return;
                }
        } else {
-               return;
+               if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+                       return;
+
+               hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
+               hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
        }
 
        mbuf->hash.rss = hash;
@@ -2800,14 +2943,20 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
        hw->mtu = ETHER_MTU;
 
+       /* VLAN insertion is incompatible with LSOv2 */
+       if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
+               hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
+
        if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
                hw->rx_offset = NFP_NET_RX_OFFSET;
        else
                hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
 
-       PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
-                    hw->ver, hw->max_mtu);
-       PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap,
+       PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
+                          NFD_CFG_MAJOR_VERSION_of(hw->ver),
+                          NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
+
+       PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
                     hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
                     hw->cap & NFP_NET_CFG_CTRL_L2BC    ? "L2BCFILT " : "",
                     hw->cap & NFP_NET_CFG_CTRL_L2MC    ? "L2MCFILT " : "",
@@ -2818,7 +2967,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                     hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
                     hw->cap & NFP_NET_CFG_CTRL_GATHER  ? "GATHER "  : "",
                     hw->cap & NFP_NET_CFG_CTRL_LSO     ? "TSO "     : "",
-                    hw->cap & NFP_NET_CFG_CTRL_RSS     ? "RSS "     : "");
+                    hw->cap & NFP_NET_CFG_CTRL_LSO2     ? "TSOv2 "     : "",
+                    hw->cap & NFP_NET_CFG_CTRL_RSS     ? "RSS "     : "",
+                    hw->cap & NFP_NET_CFG_CTRL_RSS2     ? "RSSv2 "     : "");
 
        hw->ctrl = 0;