net/nfp: fix internal port addressing
[dpdk.git] / drivers / net / dpaa2 / base / dpaa2_hw_dpni.c
index a6f86df..b901b43 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016 NXP
+ *   Copyright 2016-2021 NXP
  *
  */
 
@@ -9,7 +9,7 @@
 #include <net/if.h>
 
 #include <rte_mbuf.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_malloc.h>
 #include <rte_memcpy.h>
 #include <rte_string_fns.h>
 
 #include "../dpaa2_ethdev.h"
 
-static int
+int
 dpaa2_distset_to_dpkg_profile_cfg(
                uint64_t req_dist_set,
                struct dpkg_profile_cfg *kg_cfg);
 
 int
-dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
-                     uint64_t req_dist_set)
+rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
+                             uint16_t offset,
+                             uint8_t size)
 {
+       struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
        struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
        struct fsl_mc_io *dpni = priv->hw;
        struct dpni_rx_tc_dist_cfg tc_cfg;
@@ -39,14 +41,78 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
        void *p_params;
        int ret, tc_index = 0;
 
+       p_params = rte_zmalloc(
+               NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+       if (!p_params) {
+               DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
+               return -ENOMEM;
+       }
+
+       kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
+       kg_cfg.extracts[0].extract.from_data.offset = offset;
+       kg_cfg.extracts[0].extract.from_data.size = size;
+       kg_cfg.extracts[0].num_of_byte_masks = 0;
+       kg_cfg.num_extracts = 1;
+
+       ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
+       if (ret) {
+               DPAA2_PMD_ERR("Unable to prepare extract parameters");
+               rte_free(p_params);
+               return ret;
+       }
+
+       memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+       tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params));
+       tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
+       tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+       ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
+                                 &tc_cfg);
+       rte_free(p_params);
+       if (ret) {
+               DPAA2_PMD_ERR(
+                            "Setting distribution for Rx failed with err: %d",
+                            ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+int
+dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
+       uint64_t req_dist_set, int tc_index)
+{
+       struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+       struct fsl_mc_io *dpni = priv->hw;
+       struct dpni_rx_dist_cfg tc_cfg;
+       struct dpkg_profile_cfg kg_cfg;
+       void *p_params;
+       int ret, tc_dist_queues;
+
+       /*TC distribution size is set with dist_queues or
+        * nb_rx_queues % dist_queues in order of TC priority index.
+        * Calculating dist size for this tc_index:-
+        */
+       tc_dist_queues = eth_dev->data->nb_rx_queues -
+               tc_index * priv->dist_queues;
+       if (tc_dist_queues <= 0) {
+               DPAA2_PMD_INFO("No distribution on TC%d", tc_index);
+               return 0;
+       }
+
+       if (tc_dist_queues > priv->dist_queues)
+               tc_dist_queues = priv->dist_queues;
+
        p_params = rte_malloc(
                NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
        if (!p_params) {
                DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
                return -ENOMEM;
        }
+
        memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
-       memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+       memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
 
        ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
        if (ret) {
@@ -55,9 +121,11 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
                rte_free(p_params);
                return ret;
        }
+
        tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
-       tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
-       tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+       tc_cfg.dist_size = tc_dist_queues;
+       tc_cfg.enable = true;
+       tc_cfg.tc = tc_index;
 
        ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
        if (ret) {
@@ -66,8 +134,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
                return ret;
        }
 
-       ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
-                                 &tc_cfg);
+       ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, &tc_cfg);
        rte_free(p_params);
        if (ret) {
                DPAA2_PMD_ERR(
@@ -85,7 +152,7 @@ int dpaa2_remove_flow_dist(
 {
        struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
        struct fsl_mc_io *dpni = priv->hw;
-       struct dpni_rx_tc_dist_cfg tc_cfg;
+       struct dpni_rx_dist_cfg tc_cfg;
        struct dpkg_profile_cfg kg_cfg;
        void *p_params;
        int ret;
@@ -96,13 +163,15 @@ int dpaa2_remove_flow_dist(
                DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
                return -ENOMEM;
        }
-       memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
-       memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
-       kg_cfg.num_extracts = 0;
-       tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
+
+       memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
        tc_cfg.dist_size = 0;
-       tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
+       tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
+       tc_cfg.enable = true;
+       tc_cfg.tc = tc_index;
 
+       memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
+       kg_cfg.num_extracts = 0;
        ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
        if (ret) {
                DPAA2_PMD_ERR("Unable to prepare extract parameters");
@@ -110,8 +179,8 @@ int dpaa2_remove_flow_dist(
                return ret;
        }
 
-       ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
-                                 &tc_cfg);
+       ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token,
+                       &tc_cfg);
        rte_free(p_params);
        if (ret)
                DPAA2_PMD_ERR(
@@ -120,19 +189,21 @@ int dpaa2_remove_flow_dist(
        return ret;
 }
 
-static int
+int
 dpaa2_distset_to_dpkg_profile_cfg(
                uint64_t req_dist_set,
                struct dpkg_profile_cfg *kg_cfg)
 {
-       uint32_t loop = 0, i = 0, dist_field = 0;
+       uint32_t loop = 0, i = 0;
+       uint64_t dist_field = 0;
        int l2_configured = 0, l3_configured = 0;
        int l4_configured = 0, sctp_configured = 0;
+       int mpls_configured = 0;
 
        memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
        while (req_dist_set) {
                if (req_dist_set % 2 != 0) {
-                       dist_field = 1U << loop;
+                       dist_field = 1ULL << loop;
                        switch (dist_field) {
                        case ETH_RSS_L2_PAYLOAD:
 
@@ -151,6 +222,43 @@ dpaa2_distset_to_dpkg_profile_cfg(
                                i++;
                        break;
 
+                       case ETH_RSS_MPLS:
+
+                               if (mpls_configured)
+                                       break;
+                               mpls_configured = 1;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_MPLS;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_MPLS_MPLSL_1;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               i++;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_MPLS;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_MPLS_MPLSL_2;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               i++;
+
+                               kg_cfg->extracts[i].extract.from_hdr.prot =
+                                       NET_PROT_MPLS;
+                               kg_cfg->extracts[i].extract.from_hdr.field =
+                                       NH_FLD_MPLS_MPLSL_N;
+                               kg_cfg->extracts[i].type =
+                                       DPKG_EXTRACT_FROM_HDR;
+                               kg_cfg->extracts[i].extract.from_hdr.type =
+                                       DPKG_FULL_FIELD;
+                               i++;
+                               break;
+
                        case ETH_RSS_IPV4:
                        case ETH_RSS_FRAG_IPV4:
                        case ETH_RSS_NONFRAG_IPV4_OTHER:
@@ -257,7 +365,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
 
                        default:
                                DPAA2_PMD_WARN(
-                                            "Unsupported flow dist option %x",
+                                     "unsupported flow dist option 0x%" PRIx64,
                                             dist_field);
                                return -EINVAL;
                        }
@@ -315,6 +423,7 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
        }
 
        /*Attach buffer pool to the network interface as described by the user*/
+       memset(&bpool_cfg, 0, sizeof(struct dpni_pools_cfg));
        bpool_cfg.num_dpbp = 1;
        bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
        bpool_cfg.pools[0].backup_pool = 0;