ethdev: add namespace
[dpdk.git] / drivers / net / ixgbe / ixgbe_tm.c
index 3a6369d..ac89760 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
  */
 
 #include <rte_malloc.h>
@@ -62,6 +33,9 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
                                       uint32_t node_id,
                                       struct rte_tm_node_capabilities *cap,
                                       struct rte_tm_error *error);
+static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+                                 int clear_on_fail,
+                                 struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
        .capabilities_get = ixgbe_tm_capabilities_get,
@@ -72,6 +46,7 @@ const struct rte_tm_ops ixgbe_tm_ops = {
        .node_type_get = ixgbe_node_type_get,
        .level_capabilities_get = ixgbe_level_capabilities_get,
        .node_capabilities_get = ixgbe_node_capabilities_get,
+       .hierarchy_commit = ixgbe_hierarchy_commit,
 };
 
 int
@@ -144,14 +119,14 @@ ixgbe_tc_nb_get(struct rte_eth_dev *dev)
        uint8_t nb_tcs = 0;
 
        eth_conf = &dev->data->dev_conf;
-       if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+       if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
                nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-       } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+       } else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
                if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-                   ETH_32_POOLS)
-                       nb_tcs = ETH_4_TCS;
+                   RTE_ETH_32_POOLS)
+                       nb_tcs = RTE_ETH_4_TCS;
                else
-                       nb_tcs = ETH_8_TCS;
+                       nb_tcs = RTE_ETH_8_TCS;
        } else {
                nb_tcs = 1;
        }
@@ -193,12 +168,16 @@ ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
        cap->shaper_private_rate_min = 0;
        /* 10Gbps -> 1.25GBps */
        cap->shaper_private_rate_max = 1250000000ull;
+       cap->shaper_private_packet_mode_supported = 0;
+       cap->shaper_private_byte_mode_supported = 1;
        cap->shaper_shared_n_max = 0;
        cap->shaper_shared_n_nodes_per_shaper_max = 0;
        cap->shaper_shared_n_shapers_per_node_max = 0;
        cap->shaper_shared_dual_rate_n_max = 0;
        cap->shaper_shared_rate_min = 0;
        cap->shaper_shared_rate_max = 0;
+       cap->shaper_shared_packet_mode_supported = 0;
+       cap->shaper_shared_byte_mode_supported = 0;
        cap->sched_n_children_max = hw->mac.max_tx_queues;
        /**
         * HW supports SP. But no plan to support it now.
@@ -207,6 +186,8 @@ ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
        cap->sched_sp_n_priorities_max = 1;
        cap->sched_wfq_n_children_per_group_max = 0;
        cap->sched_wfq_n_groups_max = 0;
+       cap->sched_wfq_packet_mode_supported = 0;
+       cap->sched_wfq_byte_mode_supported = 0;
        /**
         * SW only supports fair round robin now.
         * So, all the nodes should have the same weight.
@@ -308,7 +289,7 @@ ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
        if (!shaper_profile)
                return -ENOMEM;
        shaper_profile->shaper_profile_id = shaper_profile_id;
-       (void)rte_memcpy(&shaper_profile->profile, profile,
+       rte_memcpy(&shaper_profile->profile, profile,
                         sizeof(struct rte_tm_shaper_params));
        TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
                          shaper_profile, node);
@@ -394,10 +375,10 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
        if (vf_num) {
                /* no DCB */
                if (nb_tcs == 1) {
-                       if (vf_num >= ETH_32_POOLS) {
+                       if (vf_num >= RTE_ETH_32_POOLS) {
                                *nb = 2;
                                *base = vf_num * 2;
-                       } else if (vf_num >= ETH_16_POOLS) {
+                       } else if (vf_num >= RTE_ETH_16_POOLS) {
                                *nb = 4;
                                *base = vf_num * 4;
                        } else {
@@ -411,7 +392,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
                }
        } else {
                /* VT off */
-               if (nb_tcs == ETH_8_TCS) {
+               if (nb_tcs == RTE_ETH_8_TCS) {
                        switch (tc_node_no) {
                        case 0:
                                *base = 0;
@@ -478,7 +459,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
 }
 
 static int
-ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
+ixgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
                       uint32_t priority, uint32_t weight,
                       struct rte_tm_node_params *params,
                       struct rte_tm_error *error)
@@ -513,8 +494,8 @@ ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
                return -EINVAL;
        }
 
-       /* for root node */
-       if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+       /* for non-leaf node */
+       if (node_id >= dev->data->nb_tx_queues) {
                /* check the unsupported parameters */
                if (params->nonleaf.wfq_weight_mode) {
                        error->type =
@@ -538,7 +519,7 @@ ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
                return 0;
        }
 
-       /* for TC or queue node */
+       /* for leaf node */
        /* check the unsupported parameters */
        if (params->leaf.cman) {
                error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
@@ -584,7 +565,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
                IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
        enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
        enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX;
-       struct ixgbe_tm_shaper_profile *shaper_profile;
+       struct ixgbe_tm_shaper_profile *shaper_profile = NULL;
        struct ixgbe_tm_node *tm_node;
        struct ixgbe_tm_node *parent_node;
        uint8_t nb_tcs;
@@ -602,7 +583,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
                return -EINVAL;
        }
 
-       ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight,
+       ret = ixgbe_node_param_check(dev, node_id, priority, weight,
                                     params, error);
        if (ret)
                return ret;
@@ -615,12 +596,15 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
        }
 
        /* check the shaper profile id */
-       shaper_profile = ixgbe_shaper_profile_search(dev,
-                                                    params->shaper_profile_id);
-       if (!shaper_profile) {
-               error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
-               error->message = "shaper profile not exist";
-               return -EINVAL;
+       if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+               shaper_profile = ixgbe_shaper_profile_search(
+                                       dev, params->shaper_profile_id);
+               if (!shaper_profile) {
+                       error->type =
+                               RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+                       error->message = "shaper profile not exist";
+                       return -EINVAL;
+               }
        }
 
        /* root node if not have a parent */
@@ -653,12 +637,13 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
                tm_node->no = 0;
                tm_node->parent = NULL;
                tm_node->shaper_profile = shaper_profile;
-               (void)rte_memcpy(&tm_node->params, params,
+               rte_memcpy(&tm_node->params, params,
                                 sizeof(struct rte_tm_node_params));
                tm_conf->root = tm_node;
 
                /* increase the reference counter of the shaper profile */
-               shaper_profile->reference_count++;
+               if (shaper_profile)
+                       shaper_profile->reference_count++;
 
                return 0;
        }
@@ -680,7 +665,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
        }
        /* check level */
        if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
-           level_id != parent_node_type + 1) {
+           level_id != (uint32_t)parent_node_type + 1) {
                error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
                error->message = "Wrong level";
                return -EINVAL;
@@ -733,7 +718,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
        tm_node->reference_count = 0;
        tm_node->parent = parent_node;
        tm_node->shaper_profile = shaper_profile;
-       (void)rte_memcpy(&tm_node->params, params,
+       rte_memcpy(&tm_node->params, params,
                         sizeof(struct rte_tm_node_params));
        if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
                tm_node->no = parent_node->reference_count;
@@ -749,7 +734,8 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
        tm_node->parent->reference_count++;
 
        /* increase the reference counter of the shaper profile */
-       shaper_profile->reference_count++;
+       if (shaper_profile)
+               shaper_profile->reference_count++;
 
        return 0;
 }
@@ -797,14 +783,16 @@ ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
 
        /* root node */
        if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
-               tm_node->shaper_profile->reference_count--;
+               if (tm_node->shaper_profile)
+                       tm_node->shaper_profile->reference_count--;
                rte_free(tm_node);
                tm_conf->root = NULL;
                return 0;
        }
 
        /* TC or queue node */
-       tm_node->shaper_profile->reference_count--;
+       if (tm_node->shaper_profile)
+               tm_node->shaper_profile->reference_count--;
        tm_node->parent->reference_count--;
        if (node_type == IXGBE_TM_NODE_TYPE_TC) {
                TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
@@ -872,45 +860,60 @@ ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
                cap->n_nodes_max = 1;
                cap->n_nodes_nonleaf_max = 1;
                cap->n_nodes_leaf_max = 0;
-               cap->non_leaf_nodes_identical = true;
-               cap->leaf_nodes_identical = true;
+       } else if (level_id == IXGBE_TM_NODE_TYPE_TC) {
+               /* TC */
+               cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+               cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+               cap->n_nodes_leaf_max = 0;
+       } else {
+               /* queue */
+               cap->n_nodes_max = hw->mac.max_tx_queues;
+               cap->n_nodes_nonleaf_max = 0;
+               cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
+       }
+
+       cap->non_leaf_nodes_identical = true;
+       cap->leaf_nodes_identical = true;
+
+       if (level_id != IXGBE_TM_NODE_TYPE_QUEUE) {
                cap->nonleaf.shaper_private_supported = true;
                cap->nonleaf.shaper_private_dual_rate_supported = false;
                cap->nonleaf.shaper_private_rate_min = 0;
                /* 10Gbps -> 1.25GBps */
                cap->nonleaf.shaper_private_rate_max = 1250000000ull;
+               cap->nonleaf.shaper_private_packet_mode_supported = 0;
+               cap->nonleaf.shaper_private_byte_mode_supported = 1;
                cap->nonleaf.shaper_shared_n_max = 0;
-               cap->nonleaf.sched_n_children_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+               cap->nonleaf.shaper_shared_packet_mode_supported = 0;
+               cap->nonleaf.shaper_shared_byte_mode_supported = 0;
+               if (level_id == IXGBE_TM_NODE_TYPE_PORT)
+                       cap->nonleaf.sched_n_children_max =
+                               IXGBE_DCB_MAX_TRAFFIC_CLASS;
+               else
+                       cap->nonleaf.sched_n_children_max =
+                               hw->mac.max_tx_queues;
                cap->nonleaf.sched_sp_n_priorities_max = 1;
                cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
                cap->nonleaf.sched_wfq_n_groups_max = 0;
                cap->nonleaf.sched_wfq_weight_max = 1;
+               cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+               cap->nonleaf.sched_wfq_byte_mode_supported = 0;
                cap->nonleaf.stats_mask = 0;
 
                return 0;
        }
 
-       /* TC or queue node */
-       if (level_id == IXGBE_TM_NODE_TYPE_TC) {
-               /* TC */
-               cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
-               cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
-               cap->n_nodes_leaf_max = 0;
-               cap->non_leaf_nodes_identical = true;
-       } else {
-               /* queue */
-               cap->n_nodes_max = hw->mac.max_tx_queues;
-               cap->n_nodes_nonleaf_max = 0;
-               cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
-               cap->non_leaf_nodes_identical = true;
-       }
-       cap->leaf_nodes_identical = true;
+       /* queue node */
        cap->leaf.shaper_private_supported = true;
        cap->leaf.shaper_private_dual_rate_supported = false;
        cap->leaf.shaper_private_rate_min = 0;
        /* 10Gbps -> 1.25GBps */
        cap->leaf.shaper_private_rate_max = 1250000000ull;
+       cap->leaf.shaper_private_packet_mode_supported = 0;
+       cap->leaf.shaper_private_byte_mode_supported = 1;
        cap->leaf.shaper_shared_n_max = 0;
+       cap->leaf.shaper_shared_packet_mode_supported = 0;
+       cap->leaf.shaper_shared_byte_mode_supported = 0;
        cap->leaf.cman_head_drop_supported = false;
        cap->leaf.cman_wred_context_private_supported = true;
        cap->leaf.cman_wred_context_shared_n_max = 0;
@@ -951,7 +954,11 @@ ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
        cap->shaper_private_rate_min = 0;
        /* 10Gbps -> 1.25GBps */
        cap->shaper_private_rate_max = 1250000000ull;
+       cap->shaper_private_packet_mode_supported = 0;
+       cap->shaper_private_byte_mode_supported = 1;
        cap->shaper_shared_n_max = 0;
+       cap->shaper_shared_packet_mode_supported = 0;
+       cap->shaper_shared_byte_mode_supported = 0;
 
        if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) {
                cap->leaf.cman_head_drop_supported = false;
@@ -968,9 +975,79 @@ ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
                cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
                cap->nonleaf.sched_wfq_n_groups_max = 0;
                cap->nonleaf.sched_wfq_weight_max = 1;
+               cap->nonleaf.sched_wfq_packet_mode_supported = 0;
+               cap->nonleaf.sched_wfq_byte_mode_supported = 0;
        }
 
        cap->stats_mask = 0;
 
        return 0;
 }
+
+static int
+ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+                      int clear_on_fail,
+                      struct rte_tm_error *error)
+{
+       struct ixgbe_tm_conf *tm_conf =
+               IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+       struct ixgbe_tm_node *tm_node;
+       uint64_t bw;
+       int ret;
+
+       if (!error)
+               return -EINVAL;
+
+       /* check the setting */
+       if (!tm_conf->root)
+               goto done;
+
+       /* not support port max bandwidth yet */
+       if (tm_conf->root->shaper_profile &&
+           tm_conf->root->shaper_profile->profile.peak.rate) {
+               error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+               error->message = "no port max bandwidth";
+               goto fail_clear;
+       }
+
+       /* HW not support TC max bandwidth */
+       TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+               if (tm_node->shaper_profile &&
+                   tm_node->shaper_profile->profile.peak.rate) {
+                       error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+                       error->message = "no TC max bandwidth";
+                       goto fail_clear;
+               }
+       }
+
+       /* queue max bandwidth */
+       TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+               if (tm_node->shaper_profile)
+                       bw = tm_node->shaper_profile->profile.peak.rate;
+               else
+                       bw = 0;
+               if (bw) {
+                       /* interpret Bps to Mbps */
+                       bw = bw * 8 / 1000 / 1000;
+                       ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw);
+                       if (ret) {
+                               error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+                               error->message =
+                                       "failed to set queue max bandwidth";
+                               goto fail_clear;
+                       }
+               }
+       }
+
+done:
+       tm_conf->committed = true;
+       return 0;
+
+fail_clear:
+       /* clear all the traffic manager configuration */
+       if (clear_on_fail) {
+               ixgbe_tm_conf_uninit(dev);
+               ixgbe_tm_conf_init(dev);
+       }
+       return -EINVAL;
+}