net/virtio: fix MAC address read
[dpdk.git] / drivers / net / ixgbe / ixgbe_tm.c
index ab0df75..cdcf45c 100644 (file)
@@ -50,12 +50,32 @@ static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
                          uint32_t weight, uint32_t level_id,
                          struct rte_tm_node_params *params,
                          struct rte_tm_error *error);
+static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+                            struct rte_tm_error *error);
+static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+                              int *is_leaf, struct rte_tm_error *error);
+static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+                                       uint32_t level_id,
+                                       struct rte_tm_level_capabilities *cap,
+                                       struct rte_tm_error *error);
+static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
+                                      uint32_t node_id,
+                                      struct rte_tm_node_capabilities *cap,
+                                      struct rte_tm_error *error);
+static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+                                 int clear_on_fail,
+                                 struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
        .capabilities_get = ixgbe_tm_capabilities_get,
        .shaper_profile_add = ixgbe_shaper_profile_add,
        .shaper_profile_delete = ixgbe_shaper_profile_del,
        .node_add = ixgbe_node_add,
+       .node_delete = ixgbe_node_delete,
+       .node_type_get = ixgbe_node_type_get,
+       .level_capabilities_get = ixgbe_level_capabilities_get,
+       .node_capabilities_get = ixgbe_node_capabilities_get,
+       .hierarchy_commit = ixgbe_hierarchy_commit,
 };
 
 int
@@ -737,3 +757,287 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 
        return 0;
 }
+
+static int
+ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+                 struct rte_tm_error *error)
+{
+       struct ixgbe_tm_conf *tm_conf =
+               IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+       enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+       struct ixgbe_tm_node *tm_node;
+
+       if (!error)
+               return -EINVAL;
+
+       /* if already committed */
+       if (tm_conf->committed) {
+               error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+               error->message = "already committed";
+               return -EINVAL;
+       }
+
+       if (node_id == RTE_TM_NODE_ID_NULL) {
+               error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+               error->message = "invalid node id";
+               return -EINVAL;
+       }
+
+       /* check the if the node id exists */
+       tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+       if (!tm_node) {
+               error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+               error->message = "no such node";
+               return -EINVAL;
+       }
+
+       /* the node should have no child */
+       if (tm_node->reference_count) {
+               error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+               error->message =
+                       "cannot delete a node which has children";
+               return -EINVAL;
+       }
+
+       /* root node */
+       if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
+               tm_node->shaper_profile->reference_count--;
+               rte_free(tm_node);
+               tm_conf->root = NULL;
+               return 0;
+       }
+
+       /* TC or queue node */
+       tm_node->shaper_profile->reference_count--;
+       tm_node->parent->reference_count--;
+       if (node_type == IXGBE_TM_NODE_TYPE_TC) {
+               TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+               tm_conf->nb_tc_node--;
+       } else {
+               TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+               tm_conf->nb_queue_node--;
+       }
+       rte_free(tm_node);
+
+       return 0;
+}
+
+static int
+ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+                   int *is_leaf, struct rte_tm_error *error)
+{
+       enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+       struct ixgbe_tm_node *tm_node;
+
+       if (!is_leaf || !error)
+               return -EINVAL;
+
+       if (node_id == RTE_TM_NODE_ID_NULL) {
+               error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+               error->message = "invalid node id";
+               return -EINVAL;
+       }
+
+       /* check if the node id exists */
+       tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+       if (!tm_node) {
+               error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+               error->message = "no such node";
+               return -EINVAL;
+       }
+
+       if (node_type == IXGBE_TM_NODE_TYPE_QUEUE)
+               *is_leaf = true;
+       else
+               *is_leaf = false;
+
+       return 0;
+}
+
+static int
+ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+                            uint32_t level_id,
+                            struct rte_tm_level_capabilities *cap,
+                            struct rte_tm_error *error)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (!cap || !error)
+               return -EINVAL;
+
+       if (level_id >= IXGBE_TM_NODE_TYPE_MAX) {
+               error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+               error->message = "too deep level";
+               return -EINVAL;
+       }
+
+       /* root node */
+       if (level_id == IXGBE_TM_NODE_TYPE_PORT) {
+               cap->n_nodes_max = 1;
+               cap->n_nodes_nonleaf_max = 1;
+               cap->n_nodes_leaf_max = 0;
+               cap->non_leaf_nodes_identical = true;
+               cap->leaf_nodes_identical = true;
+               cap->nonleaf.shaper_private_supported = true;
+               cap->nonleaf.shaper_private_dual_rate_supported = false;
+               cap->nonleaf.shaper_private_rate_min = 0;
+               /* 10Gbps -> 1.25GBps */
+               cap->nonleaf.shaper_private_rate_max = 1250000000ull;
+               cap->nonleaf.shaper_shared_n_max = 0;
+               cap->nonleaf.sched_n_children_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+               cap->nonleaf.sched_sp_n_priorities_max = 1;
+               cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+               cap->nonleaf.sched_wfq_n_groups_max = 0;
+               cap->nonleaf.sched_wfq_weight_max = 1;
+               cap->nonleaf.stats_mask = 0;
+
+               return 0;
+       }
+
+       /* TC or queue node */
+       if (level_id == IXGBE_TM_NODE_TYPE_TC) {
+               /* TC */
+               cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+               cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+               cap->n_nodes_leaf_max = 0;
+               cap->non_leaf_nodes_identical = true;
+       } else {
+               /* queue */
+               cap->n_nodes_max = hw->mac.max_tx_queues;
+               cap->n_nodes_nonleaf_max = 0;
+               cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
+               cap->non_leaf_nodes_identical = true;
+       }
+       cap->leaf_nodes_identical = true;
+       cap->leaf.shaper_private_supported = true;
+       cap->leaf.shaper_private_dual_rate_supported = false;
+       cap->leaf.shaper_private_rate_min = 0;
+       /* 10Gbps -> 1.25GBps */
+       cap->leaf.shaper_private_rate_max = 1250000000ull;
+       cap->leaf.shaper_shared_n_max = 0;
+       cap->leaf.cman_head_drop_supported = false;
+       cap->leaf.cman_wred_context_private_supported = true;
+       cap->leaf.cman_wred_context_shared_n_max = 0;
+       cap->leaf.stats_mask = 0;
+
+       return 0;
+}
+
+static int
+ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
+                           uint32_t node_id,
+                           struct rte_tm_node_capabilities *cap,
+                           struct rte_tm_error *error)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+       struct ixgbe_tm_node *tm_node;
+
+       if (!cap || !error)
+               return -EINVAL;
+
+       if (node_id == RTE_TM_NODE_ID_NULL) {
+               error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+               error->message = "invalid node id";
+               return -EINVAL;
+       }
+
+       /* check if the node id exists */
+       tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+       if (!tm_node) {
+               error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+               error->message = "no such node";
+               return -EINVAL;
+       }
+
+       cap->shaper_private_supported = true;
+       cap->shaper_private_dual_rate_supported = false;
+       cap->shaper_private_rate_min = 0;
+       /* 10Gbps -> 1.25GBps */
+       cap->shaper_private_rate_max = 1250000000ull;
+       cap->shaper_shared_n_max = 0;
+
+       if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) {
+               cap->leaf.cman_head_drop_supported = false;
+               cap->leaf.cman_wred_context_private_supported = true;
+               cap->leaf.cman_wred_context_shared_n_max = 0;
+       } else {
+               if (node_type == IXGBE_TM_NODE_TYPE_PORT)
+                       cap->nonleaf.sched_n_children_max =
+                               IXGBE_DCB_MAX_TRAFFIC_CLASS;
+               else
+                       cap->nonleaf.sched_n_children_max =
+                               hw->mac.max_tx_queues;
+               cap->nonleaf.sched_sp_n_priorities_max = 1;
+               cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+               cap->nonleaf.sched_wfq_n_groups_max = 0;
+               cap->nonleaf.sched_wfq_weight_max = 1;
+       }
+
+       cap->stats_mask = 0;
+
+       return 0;
+}
+
+static int
+ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+                      int clear_on_fail,
+                      struct rte_tm_error *error)
+{
+       struct ixgbe_tm_conf *tm_conf =
+               IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+       struct ixgbe_tm_node *tm_node;
+       uint64_t bw;
+       int ret;
+
+       if (!error)
+               return -EINVAL;
+
+       /* check the setting */
+       if (!tm_conf->root)
+               goto done;
+
+       /* not support port max bandwidth yet */
+       if (tm_conf->root->shaper_profile->profile.peak.rate) {
+               error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+               error->message = "no port max bandwidth";
+               goto fail_clear;
+       }
+
+       /* HW not support TC max bandwidth */
+       TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+               if (tm_node->shaper_profile->profile.peak.rate) {
+                       error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+                       error->message = "no TC max bandwidth";
+                       goto fail_clear;
+               }
+       }
+
+       /* queue max bandwidth */
+       TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+               bw = tm_node->shaper_profile->profile.peak.rate;
+               if (bw) {
+                       /* interpret Bps to Mbps */
+                       bw = bw * 8 / 1000 / 1000;
+                       ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw);
+                       if (ret) {
+                               error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+                               error->message =
+                                       "failed to set queue max bandwidth";
+                               goto fail_clear;
+                       }
+               }
+       }
+
+done:
+       tm_conf->committed = true;
+       return 0;
+
+fail_clear:
+       /* clear all the traffic manager configuration */
+       if (clear_on_fail) {
+               ixgbe_tm_conf_uninit(dev);
+               ixgbe_tm_conf_init(dev);
+       }
+       return -EINVAL;
+}