X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fbase%2Fdpaa2_hw_dpni.c;h=25b1d2bb6d9f26b09c5363b12f2868fb9aae8bd5;hb=3b1477b78b31cfc857b69f128312e9964b70871c;hp=f7a8d9f2601500e07a5cfb39641472e96cd00242;hpb=131a75b6e4df60586103d71defb85dcf9f77fb17;p=dpdk.git diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c index f7a8d9f260..25b1d2bb6d 100644 --- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c +++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016 NXP + * Copyright 2016-2019 NXP * */ @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include @@ -17,21 +17,23 @@ #include #include -#include +#include #include #include #include "../dpaa2_ethdev.h" -static int +int dpaa2_distset_to_dpkg_profile_cfg( uint64_t req_dist_set, struct dpkg_profile_cfg *kg_cfg); int -dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, - uint64_t req_dist_set) +rte_pmd_dpaa2_set_custom_hash(uint16_t port_id, + uint16_t offset, + uint8_t size) { + struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; struct fsl_mc_io *dpni = priv->hw; struct dpni_rx_tc_dist_cfg tc_cfg; @@ -39,38 +41,103 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, void *p_params; int ret, tc_index = 0; + p_params = rte_zmalloc( + NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + if (!p_params) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return -ENOMEM; + } + + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA; + kg_cfg.extracts[0].extract.from_data.offset = offset; + kg_cfg.extracts[0].extract.from_data.size = size; + kg_cfg.extracts[0].num_of_byte_masks = 0; + kg_cfg.num_extracts = 1; + + ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); + if (ret) { + DPAA2_PMD_ERR("Unable to prepare extract parameters"); + rte_free(p_params); + return ret; + } + + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.dist_size = eth_dev->data->nb_rx_queues; + tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, + &tc_cfg); + rte_free(p_params); + if (ret) { + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", + ret); + return ret; + } + + return 0; +} + +int +dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, + uint64_t req_dist_set, int tc_index) +{ + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_rx_dist_cfg tc_cfg; + struct dpkg_profile_cfg kg_cfg; + void *p_params; + int ret, tc_dist_queues; + + /*TC distribution size is set with dist_queues or + * nb_rx_queues % dist_queues in order of TC priority index. + * Calculating dist size for this tc_index:- + */ + tc_dist_queues = eth_dev->data->nb_rx_queues - + tc_index * priv->dist_queues; + if (tc_dist_queues <= 0) { + DPAA2_PMD_INFO("No distribution on TC%d", tc_index); + return 0; + } + + if (tc_dist_queues > priv->dist_queues) + tc_dist_queues = priv->dist_queues; + p_params = rte_malloc( NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); if (!p_params) { - PMD_INIT_LOG(ERR, "Memory unavailable"); + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); return -ENOMEM; } + memset(p_params, 0, DIST_PARAM_IOVA_SIZE); - memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg)); ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg); if (ret) { - PMD_INIT_LOG(ERR, "given rss_hf (%lx) not supported", - req_dist_set); + DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported", + req_dist_set); rte_free(p_params); return ret; } + tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); - tc_cfg.dist_size = eth_dev->data->nb_rx_queues; - tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; + tc_cfg.dist_size = tc_dist_queues; + tc_cfg.enable = true; + tc_cfg.tc = tc_index; ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); if (ret) { - PMD_INIT_LOG(ERR, "Unable to prepare extract parameters"); + DPAA2_PMD_ERR("Unable to prepare extract parameters"); rte_free(p_params); return ret; } - ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, - &tc_cfg); + ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, &tc_cfg); rte_free(p_params); if (ret) { - PMD_INIT_LOG(ERR, + DPAA2_PMD_ERR( "Setting distribution for Rx failed with err: %d", ret); return ret; @@ -85,7 +152,7 @@ int dpaa2_remove_flow_dist( { struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; struct fsl_mc_io *dpni = priv->hw; - struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpni_rx_dist_cfg tc_cfg; struct dpkg_profile_cfg kg_cfg; void *p_params; int ret; @@ -93,34 +160,36 @@ int dpaa2_remove_flow_dist( p_params = rte_malloc( NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); if (!p_params) { - PMD_INIT_LOG(ERR, "Memory unavailable"); + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); return -ENOMEM; } - memset(p_params, 0, DIST_PARAM_IOVA_SIZE); - memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); - kg_cfg.num_extracts = 0; - tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); + + memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg)); tc_cfg.dist_size = 0; - tc_cfg.dist_mode = DPNI_DIST_MODE_NONE; + tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.enable = true; + tc_cfg.tc = tc_index; + memset(p_params, 0, DIST_PARAM_IOVA_SIZE); + kg_cfg.num_extracts = 0; ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); if (ret) { - PMD_INIT_LOG(ERR, "Unable to prepare extract parameters"); + DPAA2_PMD_ERR("Unable to prepare extract parameters"); rte_free(p_params); return ret; } - ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, - &tc_cfg); + ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, + &tc_cfg); rte_free(p_params); if (ret) - PMD_INIT_LOG(ERR, - "Setting distribution for Rx failed with err:%d", + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", ret); return ret; } -static int +int dpaa2_distset_to_dpkg_profile_cfg( uint64_t req_dist_set, struct dpkg_profile_cfg *kg_cfg) @@ -256,7 +325,7 @@ dpaa2_distset_to_dpkg_profile_cfg( break; default: - PMD_INIT_LOG(WARNING, + DPAA2_PMD_WARN( "Unsupported flow dist option %x", dist_field); return -EINVAL; @@ -296,8 +365,10 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP | DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; + layout.pass_timestamp = true; layout.pass_frame_status = 1; layout.private_data_size = DPAA2_FD_PTA_SIZE; layout.pass_parser_result = 1; @@ -307,12 +378,13 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, &layout); if (retcode) { - PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout\n", + DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)", retcode); return retcode; } /*Attach buffer pool to the network interface as described by the user*/ + memset(&bpool_cfg, 0, sizeof(struct dpni_pools_cfg)); bpool_cfg.num_dpbp = 1; bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id; bpool_cfg.pools[0].backup_pool = 0; @@ -322,9 +394,9 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg); if (retcode != 0) { - PMD_INIT_LOG(ERR, "Error in attaching the buffer pool list" - " bpid = %d Error code = %d\n", - bpool_cfg.pools[0].dpbp_id, retcode); + DPAA2_PMD_ERR("Error configuring buffer pool on interface." + " bpid = %d error code = %d", + bpool_cfg.pools[0].dpbp_id, retcode); return retcode; }