.. SPDX-License-Identifier: BSD-3-Clause
- Copyright 2016,2020 NXP
+ Copyright 2016,2020-2021 NXP
DPAA2 Poll Mode Driver
- Jumbo frames
- Link flow control
- Scattered and gather for TX and RX
+- :ref:`Traffic Management API <dptmapi>`
+
Supported DPAA2 SoCs
--------------------
- RSS hash key cannot be modified.
- RSS RETA cannot be configured.
+
+.. _dptmapi:
+
+Traffic Management API
+----------------------
+
+DPAA2 PMD supports generic DPDK Traffic Management API which allows to
+configure the following features:
+
+1. Hierarchical scheduling
+2. Traffic shaping
+
+Internally TM is represented by a hierarchy (tree) of nodes.
+Node which has a parent is called a leaf whereas node without
+parent is called a non-leaf (root).
+
+Nodes hold following types of settings:
+
+- for egress scheduler configuration: weight
+- for egress rate limiter: private shaper
+
+Hierarchy is always constructed from the top, i.e first a root node is added
+then some number of leaf nodes. Number of leaf nodes cannot exceed number
+of configured tx queues.
+
+After hierarchy is complete it can be committed.
+
+For an additional description please refer to DPDK :doc:`Traffic Management API <../prog_guide/traffic_management>`.
+
+Supported Features
+~~~~~~~~~~~~~~~~~~
+
+The following capabilities are supported:
+
+- Level0 (root node) and Level1 are supported.
+- 1 private shaper at root node (port level) is supported.
+- 8 TX queues per port supported (1 channel per port)
+- Both SP and WFQ scheduling mechanisms are supported on all 8 queues.
+- Congestion notification is supported. It means if there is congestion on
+ the network, DPDK driver will not enqueue any packet (no taildrop or WRED)
+
+ User can also check node, level capabilities using testpmd commands.
+
+Usage example
+~~~~~~~~~~~~~
+
+For a detailed usage description please refer to "Traffic Management" section in DPDK :doc:`Testpmd Runtime Functions <../testpmd_app_ug/testpmd_funcs>`.
+
+1. Run testpmd as follows:
+
+ .. code-block:: console
+
+ ./dpdk-testpmd -c 0xf -n 1 -- -i --portmask 0x3 --nb-cores=1 --txq=4 --rxq=4
+
+2. Stop all ports:
+
+ .. code-block:: console
+
+ testpmd> port stop all
+
+3. Add shaper profile:
+
+ One port level shaper and strict priority on all 4 queues of port 0:
+
+ .. code-block:: console
+
+ add port tm node shaper profile 0 1 104857600 64 100 0 0
+ add port tm nonleaf node 0 8 -1 0 1 0 1 1 1 0
+ add port tm leaf node 0 0 8 0 1 1 -1 0 0 0 0
+ add port tm leaf node 0 1 8 1 1 1 -1 0 0 0 0
+ add port tm leaf node 0 2 8 2 1 1 -1 0 0 0 0
+ add port tm leaf node 0 3 8 3 1 1 -1 0 0 0 0
+ port tm hierarchy commit 0 no
+
+ or
+
+ One port level shaper and WFQ on all 4 queues of port 0:
+
+ .. code-block:: console
+
+ add port tm node shaper profile 0 1 104857600 64 100 0 0
+ add port tm nonleaf node 0 8 -1 0 1 0 1 1 1 0
+ add port tm leaf node 0 0 8 0 200 1 -1 0 0 0 0
+ add port tm leaf node 0 1 8 0 300 1 -1 0 0 0 0
+ add port tm leaf node 0 2 8 0 400 1 -1 0 0 0 0
+ add port tm leaf node 0 3 8 0 500 1 -1 0 0 0 0
+ port tm hierarchy commit 0 no
+
+4. Create flows as per the source IP addresses:
+
+ .. code-block:: console
+
+ flow create 1 group 0 priority 1 ingress pattern ipv4 src is \
+ 10.10.10.1 / end actions queue index 0 / end
+ flow create 1 group 0 priority 2 ingress pattern ipv4 src is \
+ 10.10.10.2 / end actions queue index 1 / end
+ flow create 1 group 0 priority 3 ingress pattern ipv4 src is \
+ 10.10.10.3 / end actions queue index 2 / end
+ flow create 1 group 0 priority 4 ingress pattern ipv4 src is \
+ 10.10.10.4 / end actions queue index 3 / end
+
+5. Start all ports
+
+ .. code-block:: console
+
+ testpmd> port start all
+
+
+
+6. Enable forwarding
+
+ .. code-block:: console
+
+ testpmd> start
+
+7. Inject the traffic on port1 as per the configured flows, you will see shaped and scheduled forwarded traffic on port0
/* * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2021 NXP
*
*/
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ dpaa2_tm_init(dev);
+
return 0;
}
return -1;
}
+ dpaa2_tm_deinit(dev);
dpaa2_flow_clean(dev);
/* Clean the device first */
ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
qinfo->conf.tx_deferred_start = 0;
}
+static int
+dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
+{
+ *(const void **)ops = &dpaa2_tm_ops;
+
+ return 0;
+}
+
static struct eth_dev_ops dpaa2_ethdev_ops = {
.dev_configure = dpaa2_eth_dev_configure,
.dev_start = dpaa2_dev_start,
.filter_ctrl = dpaa2_dev_flow_ctrl,
.rxq_info_get = dpaa2_rxq_info_get,
.txq_info_get = dpaa2_txq_info_get,
+ .tm_ops_get = dpaa2_tm_ops_get,
#if defined(RTE_LIBRTE_IEEE1588)
.timesync_enable = dpaa2_timesync_enable,
.timesync_disable = dpaa2_timesync_disable,
#include <rte_pmd_dpaa2.h>
#include <dpaa2_hw_pvt.h>
+#include "dpaa2_tm.h"
#include <mc/fsl_dpni.h>
#include <mc/fsl_mc_sys.h>
extern const struct rte_flow_ops dpaa2_flow_ops;
extern enum rte_filter_type dpaa2_filter_type;
+extern const struct rte_tm_ops dpaa2_tm_ops;
+
#define IP_ADDRESS_OFFSET_INVALID (-1)
struct dpaa2_key_info {
struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
+ LIST_HEAD(nodes, dpaa2_tm_node) nodes;
+ LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
};
int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2020 NXP
+ */
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_tm_driver.h>
+
+#include "dpaa2_ethdev.h"
+
+#define DPAA2_BURST_MAX (64 * 1024)
+
+#define DPAA2_SHAPER_MIN_RATE 0
+#define DPAA2_SHAPER_MAX_RATE 107374182400ull
+#define DPAA2_WEIGHT_MAX 24701
+
+int
+dpaa2_tm_init(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ LIST_INIT(&priv->shaper_profiles);
+ LIST_INIT(&priv->nodes);
+
+ return 0;
+}
+
+void dpaa2_tm_deinit(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_tm_shaper_profile *profile =
+ LIST_FIRST(&priv->shaper_profiles);
+ struct dpaa2_tm_node *node = LIST_FIRST(&priv->nodes);
+
+ while (profile) {
+ struct dpaa2_tm_shaper_profile *next = LIST_NEXT(profile, next);
+
+ LIST_REMOVE(profile, next);
+ rte_free(profile);
+ profile = next;
+ }
+
+ while (node) {
+ struct dpaa2_tm_node *next = LIST_NEXT(node, next);
+
+ LIST_REMOVE(node, next);
+ rte_free(node);
+ node = next;
+ }
+}
+
+static struct dpaa2_tm_node *
+dpaa2_node_from_id(struct dpaa2_dev_priv *priv, uint32_t node_id)
+{
+ struct dpaa2_tm_node *node;
+
+ LIST_FOREACH(node, &priv->nodes, next)
+ if (node->id == node_id)
+ return node;
+
+ return NULL;
+}
+
+static int
+dpaa2_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ if (!cap)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Capabilities are NULL\n");
+
+ memset(cap, 0, sizeof(*cap));
+
+ /* root node(port) + txqs number, assuming each TX
+ * Queue is mapped to each TC
+ */
+ cap->n_nodes_max = 1 + dev->data->nb_tx_queues;
+ cap->n_levels_max = 2; /* port level + txqs level */
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+
+ cap->shaper_n_max = 1;
+ cap->shaper_private_n_max = 1;
+ cap->shaper_private_dual_rate_n_max = 1;
+ cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+
+ cap->sched_n_children_max = dev->data->nb_tx_queues;
+ cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues;
+ cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues;
+ cap->sched_wfq_n_groups_max = 2;
+ cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
+
+ cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_STATS;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+
+ return 0;
+}
+
+static int
+dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ if (!cap)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ memset(cap, 0, sizeof(*cap));
+
+ if (level_id > 1)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL, "Wrong level id\n");
+
+ if (level_id == 0) { /* Root node */
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = 1;
+ cap->nonleaf.shaper_private_dual_rate_supported = 1;
+ cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+
+ cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ dev->data->nb_tx_queues;
+ cap->nonleaf.sched_wfq_n_groups_max = 2;
+ cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
+ cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+ } else { /* leaf nodes */
+ cap->n_nodes_max = dev->data->nb_tx_queues;
+ cap->n_nodes_leaf_max = dev->data->nb_tx_queues;
+ cap->leaf_nodes_identical = 1;
+
+ cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct dpaa2_tm_node *node;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ if (!cap)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ memset(cap, 0, sizeof(*cap));
+
+ node = dpaa2_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ if (node->type == 0) {
+ cap->shaper_private_supported = 1;
+
+ cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ dev->data->nb_tx_queues;
+ cap->nonleaf.sched_wfq_n_groups_max = 2;
+ cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ } else {
+ cap->stats_mask = RTE_TM_STATS_N_PKTS;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
+ struct rte_tm_error *error)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_tm_node *node;
+
+ if (!is_leaf)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ node = dpaa2_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ *is_leaf = node->type == 1/*NODE_QUEUE*/ ? 1 : 0;
+
+ return 0;
+}
+
+static struct dpaa2_tm_shaper_profile *
+dpaa2_shaper_profile_from_id(struct dpaa2_dev_priv *priv,
+ uint32_t shaper_profile_id)
+{
+ struct dpaa2_tm_shaper_profile *profile;
+
+ LIST_FOREACH(profile, &priv->shaper_profiles, next)
+ if (profile->id == shaper_profile_id)
+ return profile;
+
+ return NULL;
+}
+
+static int
+dpaa2_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *params,
+ struct rte_tm_error *error)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_tm_shaper_profile *profile;
+
+ if (!params)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+ if (params->committed.rate > DPAA2_SHAPER_MAX_RATE)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ NULL, "committed rate is out of range\n");
+
+ if (params->committed.size > DPAA2_BURST_MAX)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
+ NULL, "committed size is out of range\n");
+
+ if (params->peak.rate > DPAA2_SHAPER_MAX_RATE)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ NULL, "Peak rate is out of range\n");
+
+ if (params->peak.size > DPAA2_BURST_MAX)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
+ NULL, "Peak size is out of range\n");
+
+ if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL, "Wrong shaper profile id\n");
+
+ profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
+ if (profile)
+ return -rte_tm_error_set(error, EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL, "Profile id already exists\n");
+
+ profile = rte_zmalloc_socket(NULL, sizeof(*profile), 0,
+ rte_socket_id());
+ if (!profile)
+ return -rte_tm_error_set(error, ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ profile->id = shaper_profile_id;
+ rte_memcpy(&profile->params, params, sizeof(profile->params));
+
+ LIST_INSERT_HEAD(&priv->shaper_profiles, profile, next);
+
+ return 0;
+}
+
+static int
+dpaa2_shaper_profile_delete(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_tm_shaper_profile *profile;
+
+ profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
+ if (!profile)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL, "Profile id does not exist\n");
+
+ if (profile->refcnt)
+ return -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL, "Profile is used\n");
+
+ LIST_REMOVE(profile, next);
+ rte_free(profile);
+
+ return 0;
+}
+
+static int
+dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
+ __rte_unused uint32_t priority, uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ if (node_id == RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error, EINVAL, RTE_TM_NODE_ID_NULL,
+ NULL, "Node id is invalid\n");
+
+ if (weight > DPAA2_WEIGHT_MAX)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL, "Weight is out of range\n");
+
+ if (level_id != 0 && level_id != 1)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL, "Wrong level id\n");
+
+ if (!params)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ if (params->shared_shaper_id)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
+ NULL, "Shared shaper is not supported\n");
+
+ if (params->n_shared_shapers)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL, "Shared shaper is not supported\n");
+
+ /* verify port (root node) settings */
+ if (node_id >= dev->data->nb_tx_queues) {
+ if (params->nonleaf.wfq_weight_mode)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
+ NULL, "WFQ weight mode is not supported\n");
+
+ if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES))
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested port stats are not supported\n");
+
+ return 0;
+ }
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL, "Private shaper not supported on leaf\n");
+
+ if (params->stats_mask & ~RTE_TM_STATS_N_PKTS)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested stats are not supported\n");
+
+ /* check leaf node */
+ if (level_id == 1) {
+ if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
+ NULL, "Only taildrop is supported\n");
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority, uint32_t weight,
+ uint32_t level_id, struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_tm_shaper_profile *profile = NULL;
+ struct dpaa2_tm_node *node, *parent = NULL;
+ int ret;
+
+ if (0/* If device is started*/)
+ return -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port is already started\n");
+
+ ret = dpaa2_node_check_params(dev, node_id, priority, weight, level_id,
+ params, error);
+ if (ret)
+ return ret;
+
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ profile = dpaa2_shaper_profile_from_id(priv,
+ params->shaper_profile_id);
+ if (!profile)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL, "Shaper id does not exist\n");
+ }
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ LIST_FOREACH(node, &priv->nodes, next) {
+ if (node->type != 0 /*root node*/)
+ continue;
+
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Root node exists\n");
+ }
+ } else {
+ parent = dpaa2_node_from_id(priv, parent_node_id);
+ if (!parent)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL, "Parent node id not exist\n");
+ }
+
+ node = dpaa2_node_from_id(priv, node_id);
+ if (node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id already exists\n");
+
+ node = rte_zmalloc_socket(NULL, sizeof(*node), 0, rte_socket_id());
+ if (!node)
+ return -rte_tm_error_set(error, ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ node->id = node_id;
+ node->type = parent_node_id == RTE_TM_NODE_ID_NULL ? 0/*NODE_PORT*/ :
+ 1/*NODE_QUEUE*/;
+
+ if (parent) {
+ node->parent = parent;
+ parent->refcnt++;
+ }
+
+ if (profile) {
+ node->profile = profile;
+ profile->refcnt++;
+ }
+
+ node->weight = weight;
+ node->priority = priority;
+ node->stats_mask = params->stats_mask;
+
+ LIST_INSERT_HEAD(&priv->nodes, node, next);
+
+ return 0;
+}
+
+static int
+dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_tm_node *node;
+
+ if (0) {
+ return -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port is already started\n");
+ }
+
+ node = dpaa2_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ if (node->refcnt)
+ return -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id is used\n");
+
+ if (node->parent)
+ node->parent->refcnt--;
+
+ if (node->profile)
+ node->profile->refcnt--;
+
+ LIST_REMOVE(node, next);
+ rte_free(node);
+
+ return 0;
+}
+
+static int
+dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_tm_node *node, *temp_node;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ int ret;
+ int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
+ struct dpni_tx_priorities_cfg prio_cfg;
+
+ memset(&prio_cfg, 0, sizeof(prio_cfg));
+ memset(conf, 0, sizeof(conf));
+
+ LIST_FOREACH(node, &priv->nodes, next) {
+ if (node->type == 0/*root node*/) {
+ if (!node->profile)
+ continue;
+
+ struct dpni_tx_shaping_cfg tx_cr_shaper, tx_er_shaper;
+
+ tx_cr_shaper.max_burst_size =
+ node->profile->params.committed.size;
+ tx_cr_shaper.rate_limit =
+ node->profile->params.committed.rate / (1024 * 1024);
+ tx_er_shaper.max_burst_size =
+ node->profile->params.peak.size;
+ tx_er_shaper.rate_limit =
+ node->profile->params.peak.rate / (1024 * 1024);
+ ret = dpni_set_tx_shaping(dpni, 0, priv->token,
+ &tx_cr_shaper, &tx_er_shaper, 0);
+ if (ret) {
+ ret = -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE, NULL,
+ "Error in setting Shaping\n");
+ goto out;
+ }
+
+ continue;
+ } else { /* level 1, all leaf nodes */
+ if (node->id >= dev->data->nb_tx_queues) {
+ ret = -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID, NULL,
+ "Not enough txqs configured\n");
+ goto out;
+ }
+
+ if (conf[node->id])
+ continue;
+
+ LIST_FOREACH(temp_node, &priv->nodes, next) {
+ if (temp_node->id == node->id ||
+ temp_node->type == 0)
+ continue;
+ if (conf[temp_node->id])
+ continue;
+ if (node->priority == temp_node->priority) {
+ if (wfq_grp == 0) {
+ prio_cfg.tc_sched[temp_node->id].mode =
+ DPNI_TX_SCHED_WEIGHTED_A;
+ /* DPDK support lowest weight 1
+ * and DPAA2 platform 100
+ */
+ prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
+ temp_node->weight + 99;
+ } else if (wfq_grp == 1) {
+ prio_cfg.tc_sched[temp_node->id].mode =
+ DPNI_TX_SCHED_WEIGHTED_B;
+ prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
+ temp_node->weight + 99;
+ } else {
+ /*TODO: add one more check for
+ * number of nodes in a group
+ */
+ ret = -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Only 2 WFQ Groups are supported\n");
+ goto out;
+ }
+ conf[temp_node->id] = 1;
+ is_wfq_grp = 1;
+ }
+ }
+ if (is_wfq_grp) {
+ if (wfq_grp == 0) {
+ prio_cfg.tc_sched[node->id].mode =
+ DPNI_TX_SCHED_WEIGHTED_A;
+ prio_cfg.tc_sched[node->id].delta_bandwidth =
+ node->weight + 99;
+ prio_cfg.prio_group_A = node->priority;
+ } else if (wfq_grp == 1) {
+ prio_cfg.tc_sched[node->id].mode =
+ DPNI_TX_SCHED_WEIGHTED_B;
+ prio_cfg.tc_sched[node->id].delta_bandwidth =
+ node->weight + 99;
+ prio_cfg.prio_group_B = node->priority;
+ }
+ wfq_grp++;
+ is_wfq_grp = 0;
+ }
+ conf[node->id] = 1;
+ }
+ if (wfq_grp)
+ prio_cfg.separate_groups = 1;
+ }
+ ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
+ if (ret) {
+ ret = -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Scheduling Failed\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ if (clear_on_fail) {
+ dpaa2_tm_deinit(dev);
+ dpaa2_tm_init(dev);
+ }
+
+ return ret;
+}
+
+const struct rte_tm_ops dpaa2_tm_ops = {
+ .node_type_get = dpaa2_node_type_get,
+ .capabilities_get = dpaa2_capabilities_get,
+ .level_capabilities_get = dpaa2_level_capabilities_get,
+ .node_capabilities_get = dpaa2_node_capabilities_get,
+ .shaper_profile_add = dpaa2_shaper_profile_add,
+ .shaper_profile_delete = dpaa2_shaper_profile_delete,
+ .node_add = dpaa2_node_add,
+ .node_delete = dpaa2_node_delete,
+ .hierarchy_commit = dpaa2_hierarchy_commit,
+};
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2020 NXP
+ */
+
+#ifndef _DPAA2_TM_H_
+#define _DPAA2_TM_H_
+
+#include <rte_tm.h>
+
+struct dpaa2_tm_shaper_profile {
+ LIST_ENTRY(dpaa2_tm_shaper_profile) next;
+ uint32_t id;
+ int refcnt;
+ struct rte_tm_shaper_params params;
+};
+
+struct dpaa2_tm_node {
+ LIST_ENTRY(dpaa2_tm_node) next;
+ uint32_t id;
+ uint32_t type;
+ int refcnt;
+ struct dpaa2_tm_node *parent;
+ struct dpaa2_tm_shaper_profile *profile;
+ uint32_t weight;
+ uint32_t priority;
+ uint64_t stats_mask;
+};
+
+int dpaa2_tm_init(struct rte_eth_dev *dev);
+void dpaa2_tm_deinit(struct rte_eth_dev *dev);
+
+#endif /* _DPAA2_TM_H_ */
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2020 NXP
*
*/
#include <fsl_mc_sys.h>
return 0;
}
+/**
+ * dpni_set_tx_shaping() - Set the transmit shaping
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tx_cr_shaper: TX committed rate shaping configuration
+ * @tx_er_shaper: TX excess rate shaping configuration
+ * @coupled: Committed and excess rate shapers are coupled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
+ const struct dpni_tx_shaping_cfg *tx_er_shaper,
+ int coupled)
+{
+ struct dpni_cmd_set_tx_shaping *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
+ cmd_params->tx_cr_max_burst_size =
+ cpu_to_le16(tx_cr_shaper->max_burst_size);
+ cmd_params->tx_er_max_burst_size =
+ cpu_to_le16(tx_er_shaper->max_burst_size);
+ cmd_params->tx_cr_rate_limit =
+ cpu_to_le32(tx_cr_shaper->rate_limit);
+ cmd_params->tx_er_rate_limit =
+ cpu_to_le32(tx_er_shaper->rate_limit);
+ dpni_set_field(cmd_params->coupled, COUPLED, coupled);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
/**
* dpni_set_max_frame_length() - Set the maximum received frame length.
* @mc_io: Pointer to MC portal's I/O object
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_set_tx_priorities() - Set transmission TC priority configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Transmission selection configuration
+ *
+ * warning: Allowed only when DPNI is disabled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_tx_priorities_cfg *cfg)
+{
+ struct dpni_cmd_set_tx_priorities *cmd_params;
+ struct mc_command cmd = { 0 };
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
+ dpni_set_field(cmd_params->flags,
+ SEPARATE_GRP,
+ cfg->separate_groups);
+ cmd_params->prio_group_A = cfg->prio_group_A;
+ cmd_params->prio_group_B = cfg->prio_group_B;
+
+ for (i = 0; i + 1 < DPNI_MAX_TC; i = i + 2) {
+ dpni_set_field(cmd_params->modes[i / 2],
+ MODE_1,
+ cfg->tc_sched[i].mode);
+ dpni_set_field(cmd_params->modes[i / 2],
+ MODE_2,
+ cfg->tc_sched[i + 1].mode);
+ }
+
+ for (i = 0; i < DPNI_MAX_TC; i++) {
+ cmd_params->delta_bandwidth[i] =
+ cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
+ }
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
/**
* dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
* @mc_io: Pointer to MC portal's I/O object
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Traffic class policing configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rx_tc_policing_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_tc_policing *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_tc_policing *)cmd.params;
+ dpni_set_field(cmd_params->mode_color, COLOR, cfg->default_color);
+ dpni_set_field(cmd_params->mode_color, MODE, cfg->mode);
+ dpni_set_field(cmd_params->units, UNITS, cfg->units);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ cmd_params->cir = cpu_to_le32(cfg->cir);
+ cmd_params->cbs = cpu_to_le32(cfg->cbs);
+ cmd_params->eir = cpu_to_le32(cfg->eir);
+ cmd_params->ebs = cpu_to_le32(cfg->ebs);
+ cmd_params->tc_id = tc_id;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Traffic class policing configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ struct dpni_rx_tc_policing_cfg *cfg)
+{
+ struct dpni_rsp_get_rx_tc_policing *rsp_params;
+ struct dpni_cmd_get_rx_tc_policing *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_rx_tc_policing *)cmd.params;
+ cmd_params->tc_id = tc_id;
+
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_rsp_get_rx_tc_policing *)cmd.params;
+ cfg->options = le32_to_cpu(rsp_params->options);
+ cfg->cir = le32_to_cpu(rsp_params->cir);
+ cfg->cbs = le32_to_cpu(rsp_params->cbs);
+ cfg->eir = le32_to_cpu(rsp_params->eir);
+ cfg->ebs = le32_to_cpu(rsp_params->ebs);
+ cfg->units = dpni_get_field(rsp_params->units, UNITS);
+ cfg->mode = dpni_get_field(rsp_params->mode_color, MODE);
+ cfg->default_color = dpni_get_field(rsp_params->mode_color, COLOR);
+
+ return 0;
+}
+
+/**
+ * dpni_prepare_early_drop() - prepare an early drop.
+ * @cfg: Early-drop configuration
+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before dpni_set_rx_tc_early_drop or
+ * dpni_set_tx_tc_early_drop
+ *
+ */
+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg,
+ uint8_t *early_drop_buf)
+{
+ struct dpni_early_drop *ext_params;
+
+ ext_params = (struct dpni_early_drop *)early_drop_buf;
+
+ dpni_set_field(ext_params->flags, DROP_ENABLE, cfg->enable);
+ dpni_set_field(ext_params->flags, DROP_UNITS, cfg->units);
+ ext_params->green_drop_probability = cfg->green.drop_probability;
+ ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold);
+ ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold);
+ ext_params->yellow_drop_probability = cfg->yellow.drop_probability;
+ ext_params->yellow_max_threshold =
+ cpu_to_le64(cfg->yellow.max_threshold);
+ ext_params->yellow_min_threshold =
+ cpu_to_le64(cfg->yellow.min_threshold);
+ ext_params->red_drop_probability = cfg->red.drop_probability;
+ ext_params->red_max_threshold = cpu_to_le64(cfg->red.max_threshold);
+ ext_params->red_min_threshold = cpu_to_le64(cfg->red.min_threshold);
+}
+
+/**
+ * dpni_extract_early_drop() - extract the early drop configuration.
+ * @cfg: Early-drop configuration
+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called after dpni_get_rx_tc_early_drop or
+ * dpni_get_tx_tc_early_drop
+ *
+ */
+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
+ const uint8_t *early_drop_buf)
+{
+ const struct dpni_early_drop *ext_params;
+
+ ext_params = (const struct dpni_early_drop *)early_drop_buf;
+
+ cfg->enable = dpni_get_field(ext_params->flags, DROP_ENABLE);
+ cfg->units = dpni_get_field(ext_params->flags, DROP_UNITS);
+ cfg->green.drop_probability = ext_params->green_drop_probability;
+ cfg->green.max_threshold = le64_to_cpu(ext_params->green_max_threshold);
+ cfg->green.min_threshold = le64_to_cpu(ext_params->green_min_threshold);
+ cfg->yellow.drop_probability = ext_params->yellow_drop_probability;
+ cfg->yellow.max_threshold =
+ le64_to_cpu(ext_params->yellow_max_threshold);
+ cfg->yellow.min_threshold =
+ le64_to_cpu(ext_params->yellow_min_threshold);
+ cfg->red.drop_probability = ext_params->red_drop_probability;
+ cfg->red.max_threshold = le64_to_cpu(ext_params->red_max_threshold);
+ cfg->red.min_threshold = le64_to_cpu(ext_params->red_min_threshold);
+}
+
+/**
+ * dpni_set_early_drop() - Set traffic class early-drop configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - only Rx and Tx types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled
+ * with the early-drop configuration by calling dpni_prepare_early_drop()
+ *
+ * warning: Before calling this function, call dpni_prepare_early_drop() to
+ * prepare the early_drop_iova parameter
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_early_drop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ uint64_t early_drop_iova)
+{
+ struct dpni_cmd_early_drop *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_EARLY_DROP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_early_drop *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
+ cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_early_drop() - Get Rx traffic class early-drop configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - only Rx and Tx types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory
+ *
+ * warning: After calling this function, call dpni_extract_early_drop() to
+ * get the early drop configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_get_early_drop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ uint64_t early_drop_iova)
+{
+ struct dpni_cmd_early_drop *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_EARLY_DROP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_early_drop *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
+ cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
/**
* dpni_set_congestion_notification() - Set traffic class congestion
* notification configuration
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2020 NXP
*
*/
#ifndef __FSL_DPNI_H
uint16_t token,
struct dpni_link_state *state);
+/**
+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
+ * @rate_limit: Rate in Mbps
+ * @max_burst_size: Burst size in bytes (up to 64KB)
+ */
+struct dpni_tx_shaping_cfg {
+ uint32_t rate_limit;
+ uint16_t max_burst_size;
+};
+
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
+ const struct dpni_tx_shaping_cfg *tx_er_shaper,
+ int coupled);
+
int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint32_t cmd_flags,
uint16_t token);
+/**
+ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
+ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
+ * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A
+ * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B
+ */
+enum dpni_tx_schedule_mode {
+ DPNI_TX_SCHED_STRICT_PRIORITY = 0,
+ DPNI_TX_SCHED_WEIGHTED_A,
+ DPNI_TX_SCHED_WEIGHTED_B,
+};
+
+/**
+ * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf
+ * @mode: Scheduling mode
+ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000;
+ * not applicable for 'strict-priority' mode;
+ */
+struct dpni_tx_schedule_cfg {
+ enum dpni_tx_schedule_mode mode;
+ uint16_t delta_bandwidth;
+};
+
+/**
+ * struct dpni_tx_priorities_cfg - Structure representing transmission
+ * priorities for DPNI TCs
+ * @tc_sched: An array of traffic-classes
+ * @prio_group_A: Priority of group A
+ * @prio_group_B: Priority of group B
+ * @separate_groups: Treat A and B groups as separate
+ */
+struct dpni_tx_priorities_cfg {
+ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
+ uint32_t prio_group_A;
+ uint32_t prio_group_B;
+ uint8_t separate_groups;
+};
+
+int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_tx_priorities_cfg *cfg);
+
/**
* enum dpni_dist_mode - DPNI distribution mode
* @DPNI_DIST_MODE_NONE: No distribution
uint8_t tc_id,
const struct dpni_rx_tc_dist_cfg *cfg);
+/**
+ * Set to select color aware mode (otherwise - color blind)
+ */
+#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001
+/**
+ * Set to discard frame with RED color
+ */
+#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002
+
+/**
+ * enum dpni_policer_mode - selecting the policer mode
+ * @DPNI_POLICER_MODE_NONE: Policer is disabled
+ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through
+ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698
+ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115
+ */
+enum dpni_policer_mode {
+ DPNI_POLICER_MODE_NONE = 0,
+ DPNI_POLICER_MODE_PASS_THROUGH,
+ DPNI_POLICER_MODE_RFC_2698,
+ DPNI_POLICER_MODE_RFC_4115
+};
+
+/**
+ * enum dpni_policer_unit - DPNI policer units
+ * @DPNI_POLICER_UNIT_BYTES: bytes units
+ * @DPNI_POLICER_UNIT_FRAMES: frames units
+ */
+enum dpni_policer_unit {
+ DPNI_POLICER_UNIT_BYTES = 0,
+ DPNI_POLICER_UNIT_FRAMES
+};
+
+/**
+ * enum dpni_policer_color - selecting the policer color
+ * @DPNI_POLICER_COLOR_GREEN: Green color
+ * @DPNI_POLICER_COLOR_YELLOW: Yellow color
+ * @DPNI_POLICER_COLOR_RED: Red color
+ */
+enum dpni_policer_color {
+ DPNI_POLICER_COLOR_GREEN = 0,
+ DPNI_POLICER_COLOR_YELLOW,
+ DPNI_POLICER_COLOR_RED
+};
+
+/**
+ * struct dpni_rx_tc_policing_cfg - Policer configuration
+ * @options: Mask of available options; use 'DPNI_POLICER_OPT_<X>' values
+ * @mode: policer mode
+ * @default_color: For pass-through mode the policer re-colors with this
+ * color any incoming packets. For Color aware non-pass-through mode:
+ * policer re-colors with this color all packets with FD[DROPP]>2.
+ * @units: Bytes or Packets
+ * @cir: Committed information rate (CIR) in Kbps or packets/second
+ * @cbs: Committed burst size (CBS) in bytes or packets
+ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second
+ * Excess information rate (EIR, rfc4115) in Kbps or packets/second
+ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets
+ * Excess burst size (EBS, rfc4115) in bytes or packets
+ */
+struct dpni_rx_tc_policing_cfg {
+ uint32_t options;
+ enum dpni_policer_mode mode;
+ enum dpni_policer_unit units;
+ enum dpni_policer_color default_color;
+ uint32_t cir;
+ uint32_t cbs;
+ uint32_t eir;
+ uint32_t ebs;
+};
+
+
+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rx_tc_policing_cfg *cfg);
+
+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ struct dpni_rx_tc_policing_cfg *cfg);
+
/**
* enum dpni_congestion_unit - DPNI congestion units
* @DPNI_CONGESTION_UNIT_BYTES: bytes units
DPNI_CONGESTION_UNIT_FRAMES
};
+/**
+ * enum dpni_early_drop_mode - DPNI early drop mode
+ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled
+ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
+ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode
+ */
+enum dpni_early_drop_mode {
+ DPNI_EARLY_DROP_MODE_NONE = 0,
+ DPNI_EARLY_DROP_MODE_TAIL,
+ DPNI_EARLY_DROP_MODE_WRED
+};
+
+/**
+ * struct dpni_wred_cfg - WRED configuration
+ * @max_threshold: maximum threshold that packets may be discarded. Above this
+ * threshold all packets are discarded; must be less than 2^39;
+ * approximated to be expressed as (x+256)*2^(y-1) due to HW
+ * implementation.
+ * @min_threshold: minimum threshold that packets may be discarded at
+ * @drop_probability: probability that a packet will be discarded (1-100,
+ * associated with the max_threshold).
+ */
+struct dpni_wred_cfg {
+ uint64_t max_threshold;
+ uint64_t min_threshold;
+ uint8_t drop_probability;
+};
+
+/**
+ * struct dpni_early_drop_cfg - early-drop configuration
+ * @enable: drop enable
+ * @units: units type
+ * @green: WRED - 'green' configuration
+ * @yellow: WRED - 'yellow' configuration
+ * @red: WRED - 'red' configuration
+ */
+struct dpni_early_drop_cfg {
+ uint8_t enable;
+ enum dpni_congestion_unit units;
+ struct dpni_wred_cfg green;
+ struct dpni_wred_cfg yellow;
+ struct dpni_wred_cfg red;
+};
+
+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg,
+ uint8_t *early_drop_buf);
+
+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
+ const uint8_t *early_drop_buf);
+
+int dpni_set_early_drop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ uint64_t early_drop_iova);
+
+int dpni_get_early_drop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ uint64_t early_drop_iova);
+
/**
* enum dpni_dest - DPNI destination types
* @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2020 NXP
*
*/
#ifndef _FSL_DPNI_CMD_H
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD_V3(0x235)
+#define DPNI_CMDID_SET_RX_TC_POLICING DPNI_CMD(0x23E)
+
#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD_V2(0x240)
#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD_V2(0x241)
#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
+#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250)
+#define DPNI_CMDID_GET_RX_TC_POLICING DPNI_CMD(0x251)
+
#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V3(0x25D)
#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
#define DPNI_CMDID_GET_QUEUE DPNI_CMD_V2(0x25F)
uint64_t advertising;
};
+#define DPNI_COUPLED_SHIFT 0
+#define DPNI_COUPLED_SIZE 1
+
+struct dpni_cmd_set_tx_shaping {
+ uint16_t tx_cr_max_burst_size;
+ uint16_t tx_er_max_burst_size;
+ uint32_t pad;
+ uint32_t tx_cr_rate_limit;
+ uint32_t tx_er_rate_limit;
+ /* from LSB: coupled:1 */
+ uint8_t coupled;
+};
+
struct dpni_cmd_set_max_frame_length {
uint16_t max_frame_length;
};
uint8_t tc_id;
};
+#define DPNI_MODE_SHIFT 0
+#define DPNI_MODE_SIZE 4
+#define DPNI_COLOR_SHIFT 4
+#define DPNI_COLOR_SIZE 4
+#define DPNI_UNITS_SHIFT 0
+#define DPNI_UNITS_SIZE 4
+
+struct dpni_cmd_set_rx_tc_policing {
+ /* from LSB: mode:4 color:4 */
+ uint8_t mode_color;
+ /* from LSB: units: 4 */
+ uint8_t units;
+ uint8_t tc_id;
+ uint8_t pad;
+ uint32_t options;
+ uint32_t cir;
+ uint32_t cbs;
+ uint32_t eir;
+ uint32_t ebs;
+};
+
+struct dpni_cmd_get_rx_tc_policing {
+ uint16_t pad;
+ uint8_t tc_id;
+};
+
+struct dpni_rsp_get_rx_tc_policing {
+ /* from LSB: mode:4 color:4 */
+ uint8_t mode_color;
+ /* from LSB: units: 4 */
+ uint8_t units;
+ uint16_t pad;
+ uint32_t options;
+ uint32_t cir;
+ uint32_t cbs;
+ uint32_t eir;
+ uint32_t ebs;
+};
+
#define DPNI_DROP_ENABLE_SHIFT 0
#define DPNI_DROP_ENABLE_SIZE 1
#define DPNI_DROP_UNITS_SHIFT 2
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018-2021 NXP
if not is_linux
build = false
deps += ['mempool_dpaa2']
sources = files('base/dpaa2_hw_dpni.c',
+ 'dpaa2_tm.c',
'dpaa2_mux.c',
'dpaa2_ethdev.c',
'dpaa2_flow.c',