(shaper->mantissa << 1);
}
-static int
-nix_get_link(struct otx2_eth_dev *dev)
+int
+otx2_nix_get_link(struct otx2_eth_dev *dev)
{
int link = 13 /* SDP */;
uint16_t lmac_chan;
&pir->burst_mantissa);
}
+static void
+shaper_default_red_algo(struct otx2_eth_dev *dev,
+ struct otx2_nix_tm_node *tm_node,
+ struct otx2_nix_tm_shaper_profile *profile)
+{
+ struct shaper_params cir, pir;
+
+ /* C0 doesn't support STALL when both PIR & CIR are enabled */
+ if (profile && otx2_dev_is_96xx_Cx(dev)) {
+ memset(&cir, 0, sizeof(cir));
+ memset(&pir, 0, sizeof(pir));
+ shaper_config_to_nix(profile, &cir, &pir);
+
+ if (pir.rate && cir.rate) {
+ tm_node->red_algo = NIX_REDALG_DISCARD;
+ tm_node->flags |= NIX_TM_NODE_RED_DISCARD;
+ return;
+ }
+ }
+
+ tm_node->red_algo = NIX_REDALG_STD;
+ tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD;
+}
+
static int
populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
{
switch (hw_lvl) {
case NIX_TXSCH_LVL_SMQ:
- /* Set xoff which will be cleared later */
+ /* Set xoff which will be cleared later and minimum length
+ * which will be used for zero padding if packet length is
+ * smaller
+ */
reg[k] = NIX_AF_SMQX_CFG(schq);
- regval[k] = BIT_ULL(50);
- regval_mask[k] = ~BIT_ULL(50);
+ regval[k] = BIT_ULL(50) | NIX_MIN_HW_FRS;
+ regval_mask[k] = ~(BIT_ULL(50) | 0x7f);
k++;
/* Parent and schedule conf */
if (!otx2_dev_is_sdp(dev) &&
dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
- nix_get_link(dev));
+ otx2_nix_get_link(dev));
regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
k++;
}
if (!otx2_dev_is_sdp(dev) &&
dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
- nix_get_link(dev));
+ otx2_nix_get_link(dev));
regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
k++;
}
{
struct otx2_nix_tm_shaper_profile *profile;
struct otx2_nix_tm_node *tm_node, *parent_node;
- struct shaper_params cir, pir;
uint32_t profile_id;
profile_id = params->shaper_profile_id;
if (profile)
profile->reference_count++;
- memset(&cir, 0, sizeof(cir));
- memset(&pir, 0, sizeof(pir));
- shaper_config_to_nix(profile, &cir, &pir);
-
tm_node->parent = parent_node;
tm_node->parent_hw_id = UINT32_MAX;
- /* C0 doesn't support STALL when both PIR & CIR are enabled */
- if (lvl < OTX2_TM_LVL_QUEUE &&
- otx2_dev_is_96xx_Cx(dev) &&
- pir.rate && cir.rate)
- tm_node->red_algo = NIX_REDALG_DISCARD;
- else
- tm_node->red_algo = NIX_REDALG_STD;
+ shaper_default_red_algo(dev, tm_node, profile);
TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
return 0;
exit:
+ otx2_nix_tm_dump(dev);
return -EFAULT;
}
*is_leaf = true;
else
*is_leaf = false;
+ return 0;
+}
+
+static int
+otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc, max_nr_nodes = 0, i;
+ struct free_rsrcs_rsp *rsp;
+
+ memset(cap, 0, sizeof(*cap));
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
+ max_nr_nodes += rsp->schq[i];
+
+ cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
+ /* TL1 level is reserved for PF */
+ cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
+ OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+
+ /* Shaper Capabilities */
+ cap->shaper_private_n_max = max_nr_nodes;
+ cap->shaper_n_max = max_nr_nodes;
+ cap->shaper_private_dual_rate_n_max = max_nr_nodes;
+ cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+ cap->shaper_pkt_length_adjust_min = 0;
+ cap->shaper_pkt_length_adjust_max = 0;
+
+ /* Schedule Capabilities */
+ cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
+ cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
+ cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+ cap->sched_wfq_n_groups_max = 1;
+ cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+
+ cap->dynamic_update_mask =
+ RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
+ RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
+ cap->stats_mask =
+ RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES |
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+
+ for (i = 0; i < RTE_COLORS; i++) {
+ cap->mark_vlan_dei_supported[i] = false;
+ cap->mark_ip_ecn_tcp_supported[i] = false;
+ cap->mark_ip_dscp_supported[i] = false;
+ }
return 0;
}
+static int
+otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct free_rsrcs_rsp *rsp;
+ uint16_t hw_lvl;
+ int rc;
+
+ memset(cap, 0, sizeof(*cap));
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ hw_lvl = nix_tm_lvl2nix(dev, lvl);
+
+ if (nix_tm_is_leaf(dev, lvl)) {
+ /* Leaf */
+ cap->n_nodes_max = dev->tm_leaf_cnt;
+ cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
+ cap->leaf_nodes_identical = 1;
+ cap->leaf.stats_mask =
+ RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+
+ } else if (lvl == OTX2_TM_LVL_ROOT) {
+ /* Root node, aka TL2(vf)/TL1(pf) */
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported =
+ nix_tm_have_tl1_access(dev) ? false : true;
+ cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+
+ cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ nix_max_prio(dev, hw_lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+
+ if (nix_tm_have_tl1_access(dev))
+ cap->nonleaf.stats_mask =
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ } else if ((lvl < OTX2_TM_LVL_MAX) &&
+ (hw_lvl < NIX_TXSCH_LVL_CNT)) {
+ /* TL2, TL3, TL4, MDQ */
+ cap->n_nodes_max = rsp->schq[hw_lvl];
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = true;
+ cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+
+ /* MDQ doesn't support Strict Priority */
+ if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+ cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
+ else
+ cap->nonleaf.sched_n_children_max =
+ rsp->schq[hw_lvl - 1];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ nix_max_prio(dev, hw_lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+ } else {
+ /* unsupported level */
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return rc;
+ }
+ return 0;
+}
+
+static int
+otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct otx2_nix_tm_node *tm_node;
+ struct free_rsrcs_rsp *rsp;
+ int rc, hw_lvl, lvl;
+
+ memset(cap, 0, sizeof(*cap));
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ hw_lvl = tm_node->hw_lvl;
+ lvl = tm_node->lvl;
+
+ /* Leaf node */
+ if (nix_tm_is_leaf(dev, lvl)) {
+ cap->stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+ return 0;
+ }
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ /* Non Leaf Shaper */
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported =
+ (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
+ cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+
+ /* Non Leaf Scheduler */
+ if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+ cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
+ else
+ cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
+
+ cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+
+ if (hw_lvl == NIX_TXSCH_LVL_TL1)
+ cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ return 0;
+}
+
static int
otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
uint32_t profile_id,
}
/* Delete default/ratelimit tree */
- if (dev->tm_flags & (NIX_TM_DEFAULT_TREE)) {
+ if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
if (rc) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "failed to free default resources";
return rc;
}
- dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE);
+ dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
+ NIX_TM_RATE_LIMIT_TREE);
}
/* Free up user alloc'ed resources */
if (rc)
return rc;
+ shaper_default_red_algo(dev, tm_node, profile);
+
/* Update the PIR/CIR and clear SW XOFF */
req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
req->lvl = tm_node->hw_lvl;
const struct rte_tm_ops otx2_tm_ops = {
.node_type_get = otx2_nix_tm_node_type_get,
+ .capabilities_get = otx2_nix_tm_capa_get,
+ .level_capabilities_get = otx2_nix_tm_level_capa_get,
+ .node_capabilities_get = otx2_nix_tm_node_capa_get,
+
.shaper_profile_add = otx2_nix_tm_shaper_profile_add,
.shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
return 0;
}
+static int
+nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint32_t def = eth_dev->data->nb_tx_queues;
+ struct rte_tm_node_params params;
+ uint32_t leaf_parent, i, rc = 0;
+
+ memset(¶ms, 0, sizeof(params));
+
+ if (nix_tm_have_tl1_access(dev)) {
+ dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
+ rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL1,
+ OTX2_TM_LVL_ROOT, false, ¶ms);
+ if (rc)
+ goto error;
+ rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL2,
+ OTX2_TM_LVL_SCH1, false, ¶ms);
+ if (rc)
+ goto error;
+ rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL3,
+ OTX2_TM_LVL_SCH2, false, ¶ms);
+ if (rc)
+ goto error;
+ rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL4,
+ OTX2_TM_LVL_SCH3, false, ¶ms);
+ if (rc)
+ goto error;
+ leaf_parent = def + 3;
+
+ /* Add per queue SMQ nodes */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
+ leaf_parent,
+ 0, DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_SMQ,
+ OTX2_TM_LVL_SCH4,
+ false, ¶ms);
+ if (rc)
+ goto error;
+ }
+
+ /* Add leaf nodes */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = nix_tm_node_add_to_list(dev, i,
+ leaf_parent + 1 + i, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_CNT,
+ OTX2_TM_LVL_QUEUE,
+ false, ¶ms);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+ }
+
+ dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
+ rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
+ DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
+ OTX2_TM_LVL_ROOT, false, ¶ms);
+ if (rc)
+ goto error;
+ rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
+ DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
+ OTX2_TM_LVL_SCH1, false, ¶ms);
+ if (rc)
+ goto error;
+ rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
+ DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
+ OTX2_TM_LVL_SCH2, false, ¶ms);
+ if (rc)
+ goto error;
+ leaf_parent = def + 2;
+
+ /* Add per queue SMQ nodes */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
+ leaf_parent,
+ 0, DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_SMQ,
+ OTX2_TM_LVL_SCH3,
+ false, ¶ms);
+ if (rc)
+ goto error;
+ }
+
+ /* Add leaf nodes */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_CNT,
+ OTX2_TM_LVL_SCH4,
+ false, ¶ms);
+ if (rc)
+ break;
+ }
+error:
+ return rc;
+}
+
+static int
+otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
+ struct otx2_nix_tm_node *tm_node,
+ uint64_t tx_rate)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_shaper_profile profile;
+ struct otx2_mbox *mbox = dev->mbox;
+ volatile uint64_t *reg, *regval;
+ struct nix_txschq_config *req;
+ uint16_t flags;
+ uint8_t k = 0;
+ int rc;
+
+ flags = tm_node->flags;
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = NIX_TXSCH_LVL_MDQ;
+ reg = req->reg;
+ regval = req->regval;
+
+ if (tx_rate == 0) {
+ k += prepare_tm_sw_xoff(tm_node, true, ®[k], ®val[k]);
+ flags &= ~NIX_TM_NODE_ENABLED;
+ goto exit;
+ }
+
+ if (!(flags & NIX_TM_NODE_ENABLED)) {
+ k += prepare_tm_sw_xoff(tm_node, false, ®[k], ®val[k]);
+ flags |= NIX_TM_NODE_ENABLED;
+ }
+
+ /* Use only PIR for rate limit */
+ memset(&profile, 0, sizeof(profile));
+ profile.params.peak.rate = tx_rate;
+ /* Minimum burst of ~4us Bytes of Tx */
+ profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
+ (4ull * tx_rate) / (1E6 * 8));
+ if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
+ dev->tm_rate_min = tx_rate;
+
+ k += prepare_tm_shaper_reg(tm_node, &profile, ®[k], ®val[k]);
+exit:
+ req->num_regs = k;
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ tm_node->flags = flags;
+ return 0;
+}
+
+int
+otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t tx_rate_mbps)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
+ struct otx2_nix_tm_node *tm_node;
+ int rc;
+
+ /* Check for supported revisions */
+ if (otx2_dev_is_95xx_Ax(dev) ||
+ otx2_dev_is_96xx_Ax(dev))
+ return -EINVAL;
+
+ if (queue_idx >= eth_dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
+ !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
+ goto error;
+
+ if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
+ eth_dev->data->nb_tx_queues > 1) {
+ /* For TM topology change ethdev needs to be stopped */
+ if (eth_dev->data->dev_started)
+ return -EBUSY;
+
+ /*
+ * Disable xmit will be enabled when
+ * new topology is available.
+ */
+ rc = nix_xmit_disable(eth_dev);
+ if (rc) {
+ otx2_err("failed to disable TX, rc=%d", rc);
+ return -EIO;
+ }
+
+ rc = nix_tm_free_resources(dev, 0, 0, false);
+ if (rc < 0) {
+ otx2_tm_dbg("failed to free default resources, rc %d",
+ rc);
+ return -EIO;
+ }
+
+ rc = nix_tm_prepare_rate_limited_tree(eth_dev);
+ if (rc < 0) {
+ otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
+ return rc;
+ }
+
+ rc = nix_tm_alloc_resources(eth_dev, true);
+ if (rc != 0) {
+ otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
+ return rc;
+ }
+
+ dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
+ dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
+ }
+
+ tm_node = nix_tm_node_search(dev, queue_idx, false);
+
+ /* check if we found a valid leaf node */
+ if (!tm_node ||
+ !nix_tm_is_leaf(dev, tm_node->lvl) ||
+ !tm_node->parent ||
+ tm_node->parent->hw_id == UINT32_MAX)
+ return -EIO;
+
+ return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
+error:
+ otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
+ return -EINVAL;
+}
+
+int
+otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ if (!arg)
+ return -EINVAL;
+
+ /* Check for supported revisions */
+ if (otx2_dev_is_95xx_Ax(dev) ||
+ otx2_dev_is_96xx_Ax(dev))
+ return -EINVAL;
+
+ *(const void **)arg = &otx2_tm_ops;
+
+ return 0;
+}
+
int
otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
{