net/iavf: enable interrupt polling
[dpdk.git] / drivers / net / hns3 / hns3_tm.c
index d1639d4..db5ac78 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020-2020 Hisilicon Limited.
+ * Copyright(c) 2020-2021 HiSilicon Limited.
  */
 
 #include <rte_malloc.h>
@@ -28,8 +28,12 @@ void
 hns3_tm_conf_init(struct rte_eth_dev *dev)
 {
        struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
 
+       if (!hns3_dev_tm_supported(hw))
+               return;
+
        pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
        pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
        pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
@@ -50,9 +54,13 @@ void
 hns3_tm_conf_uninit(struct rte_eth_dev *dev)
 {
        struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct hns3_tm_shaper_profile *shaper_profile;
        struct hns3_tm_node *tm_node;
 
+       if (!hns3_dev_tm_supported(hw))
+               return;
+
        if (pf->tm_conf.nb_queue_node > 0) {
                while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
                        TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
@@ -126,33 +134,15 @@ hns3_tm_capabilities_get(struct rte_eth_dev *dev,
        cap->leaf_nodes_identical = 1;
        cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
        cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
-       cap->shaper_private_dual_rate_n_max = 0;
-       cap->shaper_private_rate_min = 0;
        cap->shaper_private_rate_max =
                hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
-       cap->shaper_shared_n_max = 0;
-       cap->shaper_shared_n_nodes_per_shaper_max = 0;
-       cap->shaper_shared_n_shapers_per_node_max = 0;
-       cap->shaper_shared_dual_rate_n_max = 0;
-       cap->shaper_shared_rate_min = 0;
-       cap->shaper_shared_rate_max = 0;
 
        cap->sched_n_children_max = max_tx_queues;
        cap->sched_sp_n_priorities_max = 1;
-       cap->sched_wfq_n_children_per_group_max = 0;
-       cap->sched_wfq_n_groups_max = 0;
        cap->sched_wfq_weight_max = 1;
 
-       cap->cman_head_drop_supported = 0;
-       cap->dynamic_update_mask = 0;
        cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
        cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
-       cap->cman_wred_context_n_max = 0;
-       cap->cman_wred_context_private_n_max = 0;
-       cap->cman_wred_context_shared_n_max = 0;
-       cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
-       cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
-       cap->stats_mask = 0;
 
        return 0;
 }
@@ -200,6 +190,12 @@ hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) {
+               error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
+               error->message = "peak rate must be at least 1Mbps";
+               return -EINVAL;
+       }
+
        if (profile->peak.size) {
                error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
                error->message = "peak bucket size not supported";
@@ -379,7 +375,7 @@ hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
                return -EINVAL;
        }
 
-       if (params->leaf.cman) {
+       if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP) {
                error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
                error->message = "congestion management not supported";
                return -EINVAL;
@@ -906,40 +902,39 @@ static int
 hns3_tm_config_port_rate(struct hns3_hw *hw,
                         struct hns3_tm_shaper_profile *shaper_profile)
 {
+       struct hns3_port_limit_rate_cmd *cfg;
+       struct hns3_cmd_desc desc;
        uint32_t firmware_rate;
        uint64_t rate;
+       int ret;
 
        if (shaper_profile) {
                rate = shaper_profile->profile.peak.rate;
                firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
        } else {
-               firmware_rate = hw->dcb_info.pg_info[0].bw_limit;
+               firmware_rate = hw->max_tm_rate;
        }
 
-       /*
-        * The TM shaper topology after device inited:
-        *     pri0 shaper   --->|
-        *     pri1 shaper   --->|
-        *     ...               |----> pg0 shaper ----> port shaper
-        *     ...               |
-        *     priX shaper   --->|
-        *
-        * Because port shaper rate maybe changed by firmware, to avoid
-        * concurrent configure, driver use pg0 shaper to achieve the rate limit
-        * of port.
-        *
-        * The finally port rate = MIN(pg0 shaper rate, port shaper rate)
-        */
-       return hns3_pg_shaper_rate_cfg(hw, 0, firmware_rate);
+       hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_LIMIT_RATE, false);
+       cfg = (struct hns3_port_limit_rate_cmd *)desc.data;
+       cfg->speed = rte_cpu_to_le_32(firmware_rate);
+
+       ret = hns3_cmd_send(hw, &desc, 1);
+       if (ret)
+               hns3_err(hw, "failed to config port rate, ret = %d", ret);
+
+       return ret;
 }
 
 static int
-hns3_tm_config_tc_rate(struct hns3_hw *hw,
-                      uint8_t tc_no,
+hns3_tm_config_tc_rate(struct hns3_hw *hw, uint8_t tc_no,
                       struct hns3_tm_shaper_profile *shaper_profile)
 {
+       struct hns3_tc_limit_rate_cmd *cfg;
+       struct hns3_cmd_desc desc;
        uint32_t firmware_rate;
        uint64_t rate;
+       int ret;
 
        if (shaper_profile) {
                rate = shaper_profile->profile.peak.rate;
@@ -948,7 +943,17 @@ hns3_tm_config_tc_rate(struct hns3_hw *hw,
                firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
        }
 
-       return hns3_pri_shaper_rate_cfg(hw, tc_no, firmware_rate);
+       hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_TC_LIMIT_RATE, false);
+       cfg = (struct hns3_tc_limit_rate_cmd *)desc.data;
+       cfg->speed = rte_cpu_to_le_32(firmware_rate);
+       cfg->tc_id = tc_no;
+
+       ret = hns3_cmd_send(hw, &desc, 1);
+       if (ret)
+               hns3_err(hw, "failed to config tc (%u) rate, ret = %d",
+                        tc_no, ret);
+
+       return ret;
 }
 
 static bool
@@ -1221,12 +1226,16 @@ static const struct rte_tm_ops hns3_tm_ops = {
 };
 
 int
-hns3_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
-               void *arg)
+hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg)
 {
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
        if (arg == NULL)
                return -EINVAL;
 
+       if (!hns3_dev_tm_supported(hw))
+               return -EOPNOTSUPP;
+
        *(const void **)arg = &hns3_tm_ops;
 
        return 0;
@@ -1237,6 +1246,9 @@ hns3_tm_dev_start_proc(struct hns3_hw *hw)
 {
        struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
 
+       if (!hns3_dev_tm_supported(hw))
+               return;
+
        if (pf->tm_conf.root && !pf->tm_conf.committed)
                hns3_warn(hw,
                    "please call hierarchy_commit() before starting the port.");
@@ -1283,6 +1295,9 @@ hns3_tm_conf_update(struct hns3_hw *hw)
        struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
        struct rte_tm_error error;
 
+       if (!hns3_dev_tm_supported(hw))
+               return 0;
+
        if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
                return 0;