net/dpaa2: add RSS flow distribution
authorHemant Agrawal <hemant.agrawal@nxp.com>
Tue, 11 Apr 2017 13:49:23 +0000 (19:19 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 19 Apr 2017 13:37:37 +0000 (15:37 +0200)
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
doc/guides/nics/features/dpaa2.ini
drivers/net/dpaa2/Makefile
drivers/net/dpaa2/base/dpaa2_hw_dpni.c [new file with mode: 0644]
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/dpaa2/dpaa2_ethdev.h

index 0b59725..20152a0 100644 (file)
@@ -5,6 +5,7 @@
 ;
 [Features]
 Queue start/stop     = Y
+RSS hash             = Y
 Linux VFIO           = Y
 ARMv8                = Y
 Usage doc            = Y
index 4350f45..7f88e9b 100644 (file)
@@ -58,6 +58,7 @@ EXPORT_MAP := rte_pmd_dpaa2_version.map
 # library version
 LIBABIVER := 1
 
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpni.c
 
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
new file mode 100644 (file)
index 0000000..c95c083
--- /dev/null
@@ -0,0 +1,287 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright (c) 2016 NXP. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <dpaa2_hw_pvt.h>
+
+#include "../dpaa2_ethdev.h"
+
+static void
+dpaa2_distset_to_dpkg_profile_cfg(
+               uint32_t req_dist_set,
+               struct dpkg_profile_cfg *kg_cfg);
+
+int
+dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
+                     uint32_t req_dist_set)
+{
+       struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+       struct fsl_mc_io *dpni = priv->hw;
+       struct dpni_rx_tc_dist_cfg tc_cfg;
+       struct dpkg_profile_cfg kg_cfg;
+       void *p_params;
+       int ret, tc_index = 0;
+
+       p_params = rte_malloc(
+               NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+       if (!p_params) {
+               RTE_LOG(ERR, PMD, "Memory unavaialble\n");
+               return -ENOMEM;
+       }
+       memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
+       memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+
+       dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
+       tc_cfg.key_cfg_iova = (uint64_t)(p_params);
+       tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
+       tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+       ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
+       if (ret) {
+               RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n");
+               rte_free(p_params);
+               return ret;
+       }
+
+       ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
+                                 &tc_cfg);
+       rte_free(p_params);
+       if (ret) {
+               RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with"
+                       " err code: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+int dpaa2_remove_flow_dist(
+       struct rte_eth_dev *eth_dev,
+       uint8_t tc_index)
+{
+       struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+       struct fsl_mc_io *dpni = priv->hw;
+       struct dpni_rx_tc_dist_cfg tc_cfg;
+       struct dpkg_profile_cfg kg_cfg;
+       void *p_params;
+       int ret;
+
+       p_params = rte_malloc(
+               NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+       if (!p_params) {
+               RTE_LOG(ERR, PMD, "Memory unavaialble\n");
+               return -ENOMEM;
+       }
+       memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
+       memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+
+       tc_cfg.key_cfg_iova = (uint64_t)(p_params);
+       tc_cfg.dist_size = 0;
+       tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
+
+       ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
+       if (ret) {
+               RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n");
+               rte_free(p_params);
+               return ret;
+       }
+
+       ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
+                                 &tc_cfg);
+       rte_free(p_params);
+       if (ret) {
+               RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with"
+                       " err code: %d\n", ret);
+               return ret;
+       }
+       return ret;
+}
+
+static void
+dpaa2_distset_to_dpkg_profile_cfg(
+               uint32_t req_dist_set,
+               struct dpkg_profile_cfg *kg_cfg)
+{
+       uint32_t loop = 0, i = 0, dist_field = 0;
+       int l2_configured = 0, l3_configured = 0;
+       int l4_configured = 0, sctp_configured = 0;
+
+       memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
+       while (req_dist_set) {
+               if (req_dist_set % 2 != 0) {
+                       dist_field = 1U << loop;
+                       switch (dist_field) {
+                       case ETH_RSS_L2_PAYLOAD:
+
+                               if (l2_configured)
+                                       break;
+                               l2_configured = 1;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_ETH;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_ETH_TYPE;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               i++;
+                       break;
+
+                       case ETH_RSS_IPV4:
+                       case ETH_RSS_FRAG_IPV4:
+                       case ETH_RSS_NONFRAG_IPV4_OTHER:
+                       case ETH_RSS_IPV6:
+                       case ETH_RSS_FRAG_IPV6:
+                       case ETH_RSS_NONFRAG_IPV6_OTHER:
+                       case ETH_RSS_IPV6_EX:
+
+                               if (l3_configured)
+                                       break;
+                               l3_configured = 1;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_IP;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_IP_SRC;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               i++;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_IP;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_IP_DST;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               i++;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_IP;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_IP_PROTO;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               kg_cfg->num_extracts++;
+                               i++;
+                       break;
+
+                       case ETH_RSS_NONFRAG_IPV4_TCP:
+                       case ETH_RSS_NONFRAG_IPV6_TCP:
+                       case ETH_RSS_NONFRAG_IPV4_UDP:
+                       case ETH_RSS_NONFRAG_IPV6_UDP:
+                       case ETH_RSS_IPV6_TCP_EX:
+                       case ETH_RSS_IPV6_UDP_EX:
+
+                               if (l4_configured)
+                                       break;
+                               l4_configured = 1;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_TCP;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_TCP_PORT_SRC;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               i++;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_TCP;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_TCP_PORT_SRC;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               i++;
+                               break;
+
+                       case ETH_RSS_NONFRAG_IPV4_SCTP:
+                       case ETH_RSS_NONFRAG_IPV6_SCTP:
+
+                               if (sctp_configured)
+                                       break;
+                               sctp_configured = 1;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_SCTP;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_SCTP_PORT_SRC;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               i++;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_SCTP;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_SCTP_PORT_DST;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               i++;
+                               break;
+
+                       default:
+                               PMD_DRV_LOG(WARNING, "Bad flow distribution"
+                                           " option %x\n", dist_field);
+                       }
+               }
+               req_dist_set = req_dist_set >> 1;
+               loop++;
+       }
+       kg_cfg->num_extracts = i;
+}
index 5618feb..61ce062 100644 (file)
@@ -115,7 +115,8 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
        }
 
        vq_id = 0;
-       for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
+       for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
+            dist_idx++) {
                mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
                mcq->tc_index = DPAA2_DEF_TC;
                mcq->flow_id = dist_idx;
@@ -141,6 +142,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 {
        struct rte_eth_dev_data *data = dev->data;
        struct rte_eth_conf *eth_conf = &data->dev_conf;
+       int ret;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -152,6 +154,18 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                return -1;
        }
 
+       if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+               /* Return in case number of Rx queues is 1 */
+               if (data->nb_rx_queues == 1)
+                       return 0;
+               ret = dpaa2_setup_flow_dist(dev,
+                               eth_conf->rx_adv_conf.rss_conf.rss_hf);
+               if (ret) {
+                       PMD_INIT_LOG(ERR, "unable to set flow distribution."
+                                    "please check queue config\n");
+                       return ret;
+               }
+       }
        return 0;
 }
 
@@ -183,7 +197,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
        dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
 
        /*Get the tc id and flow id from given VQ id*/
-       flow_id = rx_queue_id;
+       flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
        memset(&cfg, 0, sizeof(struct dpni_queue));
 
        options = options | DPNI_QUEUE_OPT_USER_CTX;
@@ -373,7 +387,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        struct fsl_mc_io *dpni_dev;
        struct dpni_attr attr;
        struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
-       int ret, hw_id;
+       int i, ret, hw_id;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -415,7 +429,16 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        priv->num_tc = attr.num_tcs;
-       priv->nb_rx_queues = attr.num_queues;
+       for (i = 0; i < attr.num_tcs; i++) {
+               priv->num_dist_per_tc[i] = attr.num_queues;
+               break;
+       }
+
+       /* Distribution is per Tc only,
+        * so choosing RX queues from default TC only
+        */
+       priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
+
        priv->nb_tx_queues = attr.num_queues;
 
        priv->hw = dpni_dev;
index 5f599a7..d24fcc6 100644 (file)
 #include <mc/fsl_dpni.h>
 #include <mc/fsl_mc_sys.h>
 
+#define MAX_TCS                        DPNI_MAX_TC
 #define MAX_RX_QUEUES          16
 #define MAX_TX_QUEUES          16
 
 /*default tc to be used for ,congestion, distribution etc configuration. */
 #define DPAA2_DEF_TC           0
 
+/* Size of the input SMMU mapped memory required by MC */
+#define DIST_PARAM_IOVA_SIZE 256
+
 struct dpaa2_dev_priv {
        void *hw;
        int32_t hw_id;
@@ -53,7 +57,15 @@ struct dpaa2_dev_priv {
        void *rx_vq[MAX_RX_QUEUES];
        void *tx_vq[MAX_TX_QUEUES];
 
+       uint16_t num_dist_per_tc[MAX_TCS];
        uint8_t num_tc;
        uint8_t flags; /*dpaa2 config flags */
 };
+
+int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
+                         uint32_t req_dist_set);
+
+int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
+                          uint8_t tc_index);
+
 #endif /* _DPAA2_ETHDEV_H */