X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_tm.c;h=73845a73d6ea14a4a6cb99ac3835b95926157a8f;hb=21e59432dfe0dc9bbcaec3d9d80c372834d6264b;hp=0e1ccf02850b5d48a81caa0d36e2f9509f8a5861;hpb=850dba2c5f41a28224d2ec4364e073805f3a6cd1;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c index 0e1ccf0285..73845a73d6 100644 --- a/drivers/net/ixgbe/ixgbe_tm.c +++ b/drivers/net/ixgbe/ixgbe_tm.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include @@ -54,6 +25,17 @@ static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, struct rte_tm_error *error); static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf, struct rte_tm_error *error); +static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error); +static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error); +static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error); const struct rte_tm_ops ixgbe_tm_ops = { .capabilities_get = ixgbe_tm_capabilities_get, @@ -62,6 +44,9 @@ const struct rte_tm_ops ixgbe_tm_ops = { .node_add = ixgbe_node_add, .node_delete = ixgbe_node_delete, .node_type_get = ixgbe_node_type_get, + .level_capabilities_get = ixgbe_level_capabilities_get, + .node_capabilities_get = ixgbe_node_capabilities_get, + .hierarchy_commit = ixgbe_hierarchy_commit, }; int @@ -298,7 +283,7 @@ ixgbe_shaper_profile_add(struct rte_eth_dev *dev, if (!shaper_profile) return -ENOMEM; shaper_profile->shaper_profile_id = shaper_profile_id; - (void)rte_memcpy(&shaper_profile->profile, profile, + rte_memcpy(&shaper_profile->profile, profile, sizeof(struct rte_tm_shaper_params)); TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list, shaper_profile, node); @@ -468,7 +453,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no, } static int -ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id, +ixgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id, uint32_t priority, uint32_t weight, struct rte_tm_node_params *params, struct rte_tm_error *error) @@ -503,8 +488,8 @@ ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id, return -EINVAL; } - /* for root node */ - if (parent_node_id == RTE_TM_NODE_ID_NULL) { + /* for non-leaf node */ + if (node_id >= dev->data->nb_tx_queues) { /* check the unsupported parameters */ if (params->nonleaf.wfq_weight_mode) { error->type = @@ -528,7 +513,7 @@ ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id, return 0; } - /* for TC or queue node */ + /* for leaf node */ /* check the unsupported parameters */ if (params->leaf.cman) { error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; @@ -574,7 +559,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX; - struct ixgbe_tm_shaper_profile *shaper_profile; + struct ixgbe_tm_shaper_profile *shaper_profile = NULL; struct ixgbe_tm_node *tm_node; struct ixgbe_tm_node *parent_node; uint8_t nb_tcs; @@ -592,7 +577,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, return -EINVAL; } - ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight, + ret = ixgbe_node_param_check(dev, node_id, priority, weight, params, error); if (ret) return ret; @@ -605,12 +590,15 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, } /* check the shaper profile id */ - shaper_profile = ixgbe_shaper_profile_search(dev, - params->shaper_profile_id); - if (!shaper_profile) { - error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; - error->message = "shaper profile not exist"; - return -EINVAL; + if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + shaper_profile = ixgbe_shaper_profile_search( + dev, params->shaper_profile_id); + if (!shaper_profile) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; + error->message = "shaper profile not exist"; + return -EINVAL; + } } /* root node if not have a parent */ @@ -643,12 +631,13 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->no = 0; tm_node->parent = NULL; tm_node->shaper_profile = shaper_profile; - (void)rte_memcpy(&tm_node->params, params, + rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); tm_conf->root = tm_node; /* increase the reference counter of the shaper profile */ - shaper_profile->reference_count++; + if (shaper_profile) + shaper_profile->reference_count++; return 0; } @@ -723,7 +712,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->reference_count = 0; tm_node->parent = parent_node; tm_node->shaper_profile = shaper_profile; - (void)rte_memcpy(&tm_node->params, params, + rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) { tm_node->no = parent_node->reference_count; @@ -739,7 +728,8 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->parent->reference_count++; /* increase the reference counter of the shaper profile */ - shaper_profile->reference_count++; + if (shaper_profile) + shaper_profile->reference_count++; return 0; } @@ -787,14 +777,16 @@ ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, /* root node */ if (node_type == IXGBE_TM_NODE_TYPE_PORT) { - tm_node->shaper_profile->reference_count--; + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; rte_free(tm_node); tm_conf->root = NULL; return 0; } /* TC or queue node */ - tm_node->shaper_profile->reference_count--; + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; tm_node->parent->reference_count--; if (node_type == IXGBE_TM_NODE_TYPE_TC) { TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); @@ -839,3 +831,201 @@ ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, return 0; } + +static int +ixgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!cap || !error) + return -EINVAL; + + if (level_id >= IXGBE_TM_NODE_TYPE_MAX) { + error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; + error->message = "too deep level"; + return -EINVAL; + } + + /* root node */ + if (level_id == IXGBE_TM_NODE_TYPE_PORT) { + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->n_nodes_leaf_max = 0; + } else if (level_id == IXGBE_TM_NODE_TYPE_TC) { + /* TC */ + cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; + cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; + cap->n_nodes_leaf_max = 0; + } else { + /* queue */ + cap->n_nodes_max = hw->mac.max_tx_queues; + cap->n_nodes_nonleaf_max = 0; + cap->n_nodes_leaf_max = hw->mac.max_tx_queues; + } + + cap->non_leaf_nodes_identical = true; + cap->leaf_nodes_identical = true; + + if (level_id != IXGBE_TM_NODE_TYPE_QUEUE) { + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = false; + cap->nonleaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->nonleaf.shaper_private_rate_max = 1250000000ull; + cap->nonleaf.shaper_shared_n_max = 0; + if (level_id == IXGBE_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + IXGBE_DCB_MAX_TRAFFIC_CLASS; + else + cap->nonleaf.sched_n_children_max = + hw->mac.max_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + cap->nonleaf.stats_mask = 0; + + return 0; + } + + /* queue node */ + cap->leaf.shaper_private_supported = true; + cap->leaf.shaper_private_dual_rate_supported = false; + cap->leaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->leaf.shaper_private_rate_max = 1250000000ull; + cap->leaf.shaper_shared_n_max = 0; + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + cap->leaf.stats_mask = 0; + + return 0; +} + +static int +ixgbe_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; + struct ixgbe_tm_node *tm_node; + + if (!cap || !error) + return -EINVAL; + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = ixgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + cap->shaper_private_supported = true; + cap->shaper_private_dual_rate_supported = false; + cap->shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->shaper_private_rate_max = 1250000000ull; + cap->shaper_shared_n_max = 0; + + if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) { + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + } else { + if (node_type == IXGBE_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + IXGBE_DCB_MAX_TRAFFIC_CLASS; + else + cap->nonleaf.sched_n_children_max = + hw->mac.max_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + } + + cap->stats_mask = 0; + + return 0; +} + +static int +ixgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_node *tm_node; + uint64_t bw; + int ret; + + if (!error) + return -EINVAL; + + /* check the setting */ + if (!tm_conf->root) + goto done; + + /* not support port max bandwidth yet */ + if (tm_conf->root->shaper_profile && + tm_conf->root->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no port max bandwidth"; + goto fail_clear; + } + + /* HW not support TC max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { + if (tm_node->shaper_profile && + tm_node->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no TC max bandwidth"; + goto fail_clear; + } + } + + /* queue max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { + if (tm_node->shaper_profile) + bw = tm_node->shaper_profile->profile.peak.rate; + else + bw = 0; + if (bw) { + /* interpret Bps to Mbps */ + bw = bw * 8 / 1000 / 1000; + ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw); + if (ret) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = + "failed to set queue max bandwidth"; + goto fail_clear; + } + } + } + +done: + tm_conf->committed = true; + return 0; + +fail_clear: + /* clear all the traffic manager configuration */ + if (clear_on_fail) { + ixgbe_tm_conf_uninit(dev); + ixgbe_tm_conf_init(dev); + } + return -EINVAL; +}