net/nfp: support new HW offloads API
authorAlejandro Lucero <alejandro.lucero@netronome.com>
Thu, 15 Mar 2018 14:30:37 +0000 (14:30 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 13 Apr 2018 22:40:21 +0000 (00:40 +0200)
In next 18.05 the old hw offload API will be removed. This patch adds
support for just the new hw offload API.

Signed-off-by: Alejandro Lucero <alejandro.lucero@netronome.com>
drivers/net/nfp/nfp_net.c
drivers/net/nfp/nfp_net_pmd.h

index 4eb032c..606cd3d 100644 (file)
@@ -378,8 +378,6 @@ nfp_net_configure(struct rte_eth_dev *dev)
        struct rte_eth_conf *dev_conf;
        struct rte_eth_rxmode *rxmode;
        struct rte_eth_txmode *txmode;
-       uint32_t new_ctrl = 0;
-       uint32_t update = 0;
        struct nfp_net_hw *hw;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -405,96 +403,154 @@ nfp_net_configure(struct rte_eth_dev *dev)
        }
 
        /* Checking RX mode */
-       if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
-               if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-                       update = NFP_NET_CFG_UPDATE_RSS;
-                       new_ctrl = NFP_NET_CFG_CTRL_RSS;
-               } else {
-                       PMD_INIT_LOG(INFO, "RSS not supported");
-                       return -EINVAL;
-               }
+       if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+           !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
+               PMD_INIT_LOG(INFO, "RSS not supported");
+               return -EINVAL;
        }
 
-       if (rxmode->split_hdr_size) {
+       /* Checking RX offloads */
+       if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) {
                PMD_INIT_LOG(INFO, "rxmode does not support split header");
                return -EINVAL;
        }
 
-       if (rxmode->hw_ip_checksum) {
-               if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
-                       new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
-               } else {
-                       PMD_INIT_LOG(INFO, "RXCSUM not supported");
-                       return -EINVAL;
-               }
-       }
+       if ((rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_RXCSUM))
+               PMD_INIT_LOG(INFO, "RXCSUM not supported");
 
-       if (rxmode->hw_vlan_filter) {
+       if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
                PMD_INIT_LOG(INFO, "VLAN filter not supported");
                return -EINVAL;
        }
 
-       if (rxmode->hw_vlan_strip) {
-               if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
-                       new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
-               } else {
-                       PMD_INIT_LOG(INFO, "hw vlan strip not supported");
-                       return -EINVAL;
-               }
+       if ((rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_RXVLAN)) {
+               PMD_INIT_LOG(INFO, "hw vlan strip not supported");
+               return -EINVAL;
        }
 
-       if (rxmode->hw_vlan_extend) {
+       if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
                PMD_INIT_LOG(INFO, "VLAN extended not supported");
                return -EINVAL;
        }
 
-       if (rxmode->jumbo_frame)
-               hw->mtu = rxmode->max_rx_pkt_len;
+       if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+               PMD_INIT_LOG(INFO, "LRO not supported");
+               return -EINVAL;
+       }
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+               PMD_INIT_LOG(INFO, "QINQ STRIP not supported");
+               return -EINVAL;
+       }
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
+               PMD_INIT_LOG(INFO, "Outer IP checksum not supported");
+               return -EINVAL;
+       }
 
-       if (!rxmode->hw_strip_crc)
-               PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable");
+       if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
+               PMD_INIT_LOG(INFO, "MACSEC strip not supported");
+               return -EINVAL;
+       }
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_MACSEC_STRIP) {
+               PMD_INIT_LOG(INFO, "MACSEC strip not supported");
+               return -EINVAL;
+       }
 
-       if (rxmode->enable_scatter) {
+       if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
+               PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
+
+       if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_SCATTER)) {
                PMD_INIT_LOG(INFO, "Scatter not supported");
                return -EINVAL;
        }
 
-       /* If next capabilities are supported, configure them by default */
+       if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+               PMD_INIT_LOG(INFO, "timestamp offfload not supported");
+               return -EINVAL;
+       }
 
-       /* VLAN insertion */
-       if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
-               new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+       if (rxmode->offloads & DEV_RX_OFFLOAD_SECURITY) {
+               PMD_INIT_LOG(INFO, "security offload not supported");
+               return -EINVAL;
+       }
 
-       /* L2 broadcast */
-       if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
-               new_ctrl |= NFP_NET_CFG_CTRL_L2BC;
+       /* checking TX offloads */
+       if ((txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
+               PMD_INIT_LOG(INFO, "vlan insert offload not supported");
+               return -EINVAL;
+       }
 
-       /* L2 multicast */
-       if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
-               new_ctrl |= NFP_NET_CFG_CTRL_L2MC;
+       if ((txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) {
+               PMD_INIT_LOG(INFO, "TX checksum offload not supported");
+               return -EINVAL;
+       }
 
-       /* TX checksum offload */
-       if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
-               new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+       if (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) {
+               PMD_INIT_LOG(INFO, "TX SCTP checksum offload not supported");
+               return -EINVAL;
+       }
 
-       /* LSO offload */
-       if (hw->cap & NFP_NET_CFG_CTRL_LSO)
-               new_ctrl |= NFP_NET_CFG_CTRL_LSO;
+       if ((txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_LSO)) {
+               PMD_INIT_LOG(INFO, "TSO TCP offload not supported");
+               return -EINVAL;
+       }
 
-       /* RX gather */
-       if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
-               new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
+       if (txmode->offloads & DEV_TX_OFFLOAD_UDP_TSO) {
+               PMD_INIT_LOG(INFO, "TSO UDP offload not supported");
+               return -EINVAL;
+       }
 
-       if (!new_ctrl)
-               return 0;
+       if (txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
+               PMD_INIT_LOG(INFO, "TX outer checksum offload not supported");
+               return -EINVAL;
+       }
 
-       update |= NFP_NET_CFG_UPDATE_GEN;
+       if (txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) {
+               PMD_INIT_LOG(INFO, "QINQ insert offload not supported");
+               return -EINVAL;
+       }
 
-       nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
-       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
-               return -EIO;
+       if (txmode->offloads & DEV_TX_OFFLOAD_VXLAN_TNL_TSO ||
+           txmode->offloads & DEV_TX_OFFLOAD_GRE_TNL_TSO ||
+           txmode->offloads & DEV_TX_OFFLOAD_IPIP_TNL_TSO ||
+           txmode->offloads & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
+               PMD_INIT_LOG(INFO, "tunneling offload not supported");
+               return -EINVAL;
+       }
 
-       hw->ctrl = new_ctrl;
+       if (txmode->offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) {
+               PMD_INIT_LOG(INFO, "TX MACSEC offload not supported");
+               return -EINVAL;
+       }
+
+       if (txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE) {
+               PMD_INIT_LOG(INFO, "multiqueue lockfree not supported");
+               return -EINVAL;
+       }
+
+       if ((txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+           !(hw->cap & NFP_NET_CFG_CTRL_GATHER)) {
+               PMD_INIT_LOG(INFO, "TX multisegs  not supported");
+               return -EINVAL;
+       }
+
+       if (txmode->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
+               PMD_INIT_LOG(INFO, "mbuf fast-free not supported");
+               return -EINVAL;
+       }
+
+       if (txmode->offloads & DEV_TX_OFFLOAD_SECURITY) {
+               PMD_INIT_LOG(INFO, "TX security offload not supported");
+               return -EINVAL;
+       }
 
        return 0;
 }
@@ -672,15 +728,71 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
        return 0;
 }
 
+static uint32_t
+nfp_check_offloads(struct rte_eth_dev *dev)
+{
+       struct nfp_net_hw *hw;
+       struct rte_eth_conf *dev_conf;
+       struct rte_eth_rxmode *rxmode;
+       struct rte_eth_txmode *txmode;
+       uint32_t ctrl = 0;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       dev_conf = &dev->data->dev_conf;
+       rxmode = &dev_conf->rxmode;
+       txmode = &dev_conf->txmode;
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+               if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+                       ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+       }
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+               if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
+                       ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+       }
+
+       if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+               hw->mtu = rxmode->max_rx_pkt_len;
+
+       if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+               ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+
+       /* L2 broadcast */
+       if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
+               ctrl |= NFP_NET_CFG_CTRL_L2BC;
+
+       /* L2 multicast */
+       if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
+               ctrl |= NFP_NET_CFG_CTRL_L2MC;
+
+       /* TX checksum offload */
+       if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+           txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
+           txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+               ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+
+       /* LSO offload */
+       if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO)
+               ctrl |= NFP_NET_CFG_CTRL_LSO;
+
+       /* RX gather */
+       if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+               ctrl |= NFP_NET_CFG_CTRL_GATHER;
+
+       return ctrl;
+}
+
 static int
 nfp_net_start(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
-       struct rte_eth_conf *dev_conf;
-       struct rte_eth_rxmode *rxmode;
        uint32_t new_ctrl, update = 0;
        struct nfp_net_hw *hw;
+       struct rte_eth_conf *dev_conf;
+       struct rte_eth_rxmode *rxmode;
        uint32_t intr_vector;
        int ret;
 
@@ -691,9 +803,6 @@ nfp_net_start(struct rte_eth_dev *dev)
        /* Disabling queues just in case... */
        nfp_net_disable_queues(dev);
 
-       /* Writing configuration parameters in the device */
-       nfp_net_params_setup(hw);
-
        /* Enabling the required queues in the device */
        nfp_net_enable_queues(dev);
 
@@ -728,21 +837,22 @@ nfp_net_start(struct rte_eth_dev *dev)
 
        rte_intr_enable(intr_handle);
 
+       new_ctrl = nfp_check_offloads(dev);
+
+       /* Writing configuration parameters in the device */
+       nfp_net_params_setup(hw);
+
        dev_conf = &dev->data->dev_conf;
        rxmode = &dev_conf->rxmode;
 
-       /* Checking RX mode */
        if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
-               if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
-                       if (!nfp_net_rss_config_default(dev))
-                               update |= NFP_NET_CFG_UPDATE_RSS;
-               } else {
-                       PMD_INIT_LOG(INFO, "RSS not supported");
-                       return -EINVAL;
-               }
+               nfp_net_rss_config_default(dev);
+               update |= NFP_NET_CFG_UPDATE_RSS;
+               new_ctrl |= NFP_NET_CFG_CTRL_RSS;
        }
+
        /* Enable device */
-       new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
+       new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
 
        update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
 
@@ -1159,6 +1269,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                                             DEV_RX_OFFLOAD_UDP_CKSUM |
                                             DEV_RX_OFFLOAD_TCP_CKSUM;
 
+       dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
        if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
                dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
 
@@ -1167,6 +1279,12 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                                             DEV_TX_OFFLOAD_UDP_CKSUM |
                                             DEV_TX_OFFLOAD_TCP_CKSUM;
 
+       if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+       if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
+               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
                        .pthresh = DEFAULT_RX_PTHRESH,
@@ -1185,8 +1303,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                },
                .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
                .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
-               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-                            ETH_TXQ_FLAGS_NOOFFLOADS,
        };
 
        dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
@@ -1200,9 +1316,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
                               ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
                               ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
-
-       if (hw->cap & NFP_NET_CFG_CTRL_LSO)
-               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
 }
 
 static const uint32_t *
@@ -1451,6 +1564,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
        const struct rte_memzone *tz;
        struct nfp_net_rxq *rxq;
        struct nfp_net_hw *hw;
+       struct rte_eth_conf *dev_conf;
+       struct rte_eth_rxmode *rxmode;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1464,6 +1579,17 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       dev_conf = &dev->data->dev_conf;
+       rxmode = &dev_conf->rxmode;
+
+       if (rx_conf->offloads != rxmode->offloads) {
+               RTE_LOG(ERR, PMD, "queue %u rx offloads not as port offloads\n",
+                                 queue_idx);
+               RTE_LOG(ERR, PMD, "\tport: %" PRIx64 "\n", rxmode->offloads);
+               RTE_LOG(ERR, PMD, "\tqueue: %" PRIx64 "\n", rx_conf->offloads);
+               return -EINVAL;
+       }
+
        /*
         * Free memory prior to re-allocation if needed. This is the case after
         * calling nfp_net_stop
@@ -1600,6 +1726,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        struct nfp_net_txq *txq;
        uint16_t tx_free_thresh;
        struct nfp_net_hw *hw;
+       struct rte_eth_conf *dev_conf;
+       struct rte_eth_txmode *txmode;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -1613,6 +1741,15 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                return -EINVAL;
        }
 
+       dev_conf = &dev->data->dev_conf;
+       txmode = &dev_conf->txmode;
+
+       if (tx_conf->offloads != txmode->offloads) {
+               RTE_LOG(ERR, PMD, "queue %u tx offloads not as port offloads",
+                                 queue_idx);
+               return -EINVAL;
+       }
+
        tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
                                    tx_conf->tx_free_thresh :
                                    DEFAULT_TX_FREE_THRESH);
@@ -1672,7 +1809,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
 
        txq->port_id = dev->data->port_id;
-       txq->txq_flags = tx_conf->txq_flags;
 
        /* Saving physical and virtual addresses for the TX ring */
        txq->dma = (uint64_t)tz->iova;
index 097c871..3c80702 100644 (file)
@@ -248,15 +248,13 @@ struct nfp_net_txq {
        /*
         * At this point 48 bytes have been used for all the fields in the
         * TX critical path. We have room for 8 bytes and still all placed
-        * in a cache line. We are not using the threshold values below nor
-        * the txq_flags but if we need to, we can add the most used in the
-        * remaining bytes.
+        * in a cache line. We are not using the threshold values below but
+        * if we need to, we can add the most used in the remaining bytes.
         */
        uint32_t tx_rs_thresh; /* not used by now. Future? */
        uint32_t tx_pthresh;   /* not used by now. Future? */
        uint32_t tx_hthresh;   /* not used by now. Future? */
        uint32_t tx_wthresh;   /* not used by now. Future? */
-       uint32_t txq_flags;    /* not used by now. Future? */
        uint16_t port_id;
        int qidx;
        int tx_qcidx;