ethdev: add device flag to bypass auto-filled queue xstats
[dpdk.git] / drivers / net / octeontx2 / otx2_tm.c
index 33f912e..fdd5669 100644 (file)
@@ -237,6 +237,30 @@ shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
                                                 &pir->burst_mantissa);
 }
 
+static void
+shaper_default_red_algo(struct otx2_eth_dev *dev,
+                       struct otx2_nix_tm_node *tm_node,
+                       struct otx2_nix_tm_shaper_profile *profile)
+{
+       struct shaper_params cir, pir;
+
+       /* C0 doesn't support STALL when both PIR & CIR are enabled */
+       if (profile && otx2_dev_is_96xx_Cx(dev)) {
+               memset(&cir, 0, sizeof(cir));
+               memset(&pir, 0, sizeof(pir));
+               shaper_config_to_nix(profile, &cir, &pir);
+
+               if (pir.rate && cir.rate) {
+                       tm_node->red_algo = NIX_REDALG_DISCARD;
+                       tm_node->flags |= NIX_TM_NODE_RED_DISCARD;
+                       return;
+               }
+       }
+
+       tm_node->red_algo = NIX_REDALG_STD;
+       tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD;
+}
+
 static int
 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
 {
@@ -336,18 +360,25 @@ prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
 {
        struct shaper_params cir, pir;
        uint32_t schq = tm_node->hw_id;
+       uint64_t adjust = 0;
        uint8_t k = 0;
 
        memset(&cir, 0, sizeof(cir));
        memset(&pir, 0, sizeof(pir));
        shaper_config_to_nix(profile, &cir, &pir);
 
-       otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
-                   "pir %" PRIu64 "(%" PRIu64 "B),"
-                    " cir %" PRIu64 "(%" PRIu64 "B) (%p)",
-                    nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
-                    tm_node->id, pir.rate, pir.burst,
-                    cir.rate, cir.burst, tm_node);
+       /* Packet length adjust */
+       if (tm_node->pkt_mode)
+               adjust = 1;
+       else if (profile)
+               adjust = profile->params.pkt_length_adjust & 0x1FF;
+
+       otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, pir %" PRIu64
+                   "(%" PRIu64 "B), cir %" PRIu64 "(%" PRIu64 "B)"
+                   "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
+                   nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
+                   tm_node->id, pir.rate, pir.burst, cir.rate, cir.burst,
+                   adjust, tm_node->pkt_mode, tm_node);
 
        switch (tm_node->hw_lvl) {
        case NIX_TXSCH_LVL_SMQ:
@@ -364,7 +395,9 @@ prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
 
                /* Configure RED ALG */
                reg[k] = NIX_AF_MDQX_SHAPE(schq);
-               regval[k] = ((uint64_t)tm_node->red_algo << 9);
+               regval[k] = (adjust |
+                            (uint64_t)tm_node->red_algo << 9 |
+                            (uint64_t)tm_node->pkt_mode << 24);
                k++;
                break;
        case NIX_TXSCH_LVL_TL4:
@@ -381,7 +414,9 @@ prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
 
                /* Configure RED algo */
                reg[k] = NIX_AF_TL4X_SHAPE(schq);
-               regval[k] = ((uint64_t)tm_node->red_algo << 9);
+               regval[k] = (adjust |
+                            (uint64_t)tm_node->red_algo << 9 |
+                            (uint64_t)tm_node->pkt_mode << 24);
                k++;
                break;
        case NIX_TXSCH_LVL_TL3:
@@ -398,7 +433,9 @@ prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
 
                /* Configure RED algo */
                reg[k] = NIX_AF_TL3X_SHAPE(schq);
-               regval[k] = ((uint64_t)tm_node->red_algo << 9);
+               regval[k] = (adjust |
+                            (uint64_t)tm_node->red_algo << 9 |
+                            (uint64_t)tm_node->pkt_mode << 24);
                k++;
 
                break;
@@ -416,7 +453,9 @@ prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
 
                /* Configure RED algo */
                reg[k] = NIX_AF_TL2X_SHAPE(schq);
-               regval[k] = ((uint64_t)tm_node->red_algo << 9);
+               regval[k] = (adjust |
+                            (uint64_t)tm_node->red_algo << 9 |
+                            (uint64_t)tm_node->pkt_mode << 24);
                k++;
 
                break;
@@ -426,6 +465,12 @@ prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
                regval[k] = (cir.rate && cir.burst) ?
                                (shaper2regval(&cir) | 1) : 0;
                k++;
+
+               /* Configure length disable and adjust */
+               reg[k] = NIX_AF_TL1X_SHAPE(schq);
+               regval[k] = (adjust |
+                            (uint64_t)tm_node->pkt_mode << 24);
+               k++;
                break;
        }
 
@@ -531,10 +576,14 @@ populate_tm_reg(struct otx2_eth_dev *dev,
        switch (hw_lvl) {
        case NIX_TXSCH_LVL_SMQ:
 
-               /* Set xoff which will be cleared later */
+               /* Set xoff which will be cleared later and minimum length
+                * which will be used for zero padding if packet length is
+                * smaller
+                */
                reg[k] = NIX_AF_SMQX_CFG(schq);
-               regval[k] = BIT_ULL(50);
-               regval_mask[k] = ~BIT_ULL(50);
+               regval[k] = BIT_ULL(50) | ((uint64_t)NIX_MAX_VTAG_INS << 36) |
+                       NIX_MIN_HW_FRS;
+               regval_mask[k] = ~(BIT_ULL(50) | (0x7ULL << 36) | 0x7f);
                k++;
 
                /* Parent and schedule conf */
@@ -744,7 +793,6 @@ nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
 {
        struct otx2_nix_tm_shaper_profile *profile;
        struct otx2_nix_tm_node *tm_node, *parent_node;
-       struct shaper_params cir, pir;
        uint32_t profile_id;
 
        profile_id = params->shaper_profile_id;
@@ -773,24 +821,23 @@ nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
        tm_node->flags = 0;
        if (user)
                tm_node->flags = NIX_TM_NODE_USER;
+
+       /* Packet mode */
+       if (!nix_tm_is_leaf(dev, lvl) &&
+           ((profile && profile->params.packet_mode) ||
+            (params->nonleaf.wfq_weight_mode &&
+             params->nonleaf.n_sp_priorities &&
+             !params->nonleaf.wfq_weight_mode[0])))
+               tm_node->pkt_mode = 1;
+
        rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
 
        if (profile)
                profile->reference_count++;
 
-       memset(&cir, 0, sizeof(cir));
-       memset(&pir, 0, sizeof(pir));
-       shaper_config_to_nix(profile, &cir, &pir);
-
        tm_node->parent = parent_node;
        tm_node->parent_hw_id = UINT32_MAX;
-       /* C0 doesn't support STALL when both PIR & CIR are enabled */
-       if (lvl < OTX2_TM_LVL_QUEUE &&
-           otx2_dev_is_96xx_Cx(dev) &&
-           pir.rate && cir.rate)
-               tm_node->red_algo = NIX_REDALG_DISCARD;
-       else
-               tm_node->red_algo = NIX_REDALG_STD;
+       shaper_default_red_algo(dev, tm_node, profile);
 
        TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
 
@@ -1834,7 +1881,233 @@ otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
                *is_leaf = true;
        else
                *is_leaf = false;
+       return 0;
+}
+
+static int
+otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
+                    struct rte_tm_capabilities *cap,
+                    struct rte_tm_error *error)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct otx2_mbox *mbox = dev->mbox;
+       int rc, max_nr_nodes = 0, i;
+       struct free_rsrcs_rsp *rsp;
+
+       memset(cap, 0, sizeof(*cap));
+
+       otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+       rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+       if (rc) {
+               error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+               error->message = "unexpected fatal error";
+               return rc;
+       }
+
+       for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
+               max_nr_nodes += rsp->schq[i];
+
+       cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
+       /* TL1 level is reserved for PF */
+       cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
+                               OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
+       cap->non_leaf_nodes_identical = 1;
+       cap->leaf_nodes_identical = 1;
+
+       /* Shaper Capabilities */
+       cap->shaper_private_n_max = max_nr_nodes;
+       cap->shaper_n_max = max_nr_nodes;
+       cap->shaper_private_dual_rate_n_max = max_nr_nodes;
+       cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+       cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+       cap->shaper_private_packet_mode_supported = 1;
+       cap->shaper_private_byte_mode_supported = 1;
+       cap->shaper_pkt_length_adjust_min = NIX_LENGTH_ADJUST_MIN;
+       cap->shaper_pkt_length_adjust_max = NIX_LENGTH_ADJUST_MAX;
+
+       /* Schedule Capabilities */
+       cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
+       cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
+       cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+       cap->sched_wfq_n_groups_max = 1;
+       cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+       cap->sched_wfq_packet_mode_supported = 1;
+       cap->sched_wfq_byte_mode_supported = 1;
+
+       cap->dynamic_update_mask =
+               RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
+               RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
+       cap->stats_mask =
+               RTE_TM_STATS_N_PKTS |
+               RTE_TM_STATS_N_BYTES |
+               RTE_TM_STATS_N_PKTS_RED_DROPPED |
+               RTE_TM_STATS_N_BYTES_RED_DROPPED;
+
+       for (i = 0; i < RTE_COLORS; i++) {
+               cap->mark_vlan_dei_supported[i] = false;
+               cap->mark_ip_ecn_tcp_supported[i] = false;
+               cap->mark_ip_dscp_supported[i] = false;
+       }
+
+       return 0;
+}
+
+static int
+otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
+                                  struct rte_tm_level_capabilities *cap,
+                                  struct rte_tm_error *error)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct otx2_mbox *mbox = dev->mbox;
+       struct free_rsrcs_rsp *rsp;
+       uint16_t hw_lvl;
+       int rc;
+
+       memset(cap, 0, sizeof(*cap));
+
+       otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+       rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+       if (rc) {
+               error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+               error->message = "unexpected fatal error";
+               return rc;
+       }
+
+       hw_lvl = nix_tm_lvl2nix(dev, lvl);
+
+       if (nix_tm_is_leaf(dev, lvl)) {
+               /* Leaf */
+               cap->n_nodes_max = dev->tm_leaf_cnt;
+               cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
+               cap->leaf_nodes_identical = 1;
+               cap->leaf.stats_mask =
+                       RTE_TM_STATS_N_PKTS |
+                       RTE_TM_STATS_N_BYTES;
+
+       } else if (lvl == OTX2_TM_LVL_ROOT) {
+               /* Root node, aka TL2(vf)/TL1(pf) */
+               cap->n_nodes_max = 1;
+               cap->n_nodes_nonleaf_max = 1;
+               cap->non_leaf_nodes_identical = 1;
+
+               cap->nonleaf.shaper_private_supported = true;
+               cap->nonleaf.shaper_private_dual_rate_supported =
+                       nix_tm_have_tl1_access(dev) ? false : true;
+               cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+               cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+               cap->nonleaf.shaper_private_packet_mode_supported = 1;
+               cap->nonleaf.shaper_private_byte_mode_supported = 1;
+
+               cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
+               cap->nonleaf.sched_sp_n_priorities_max =
+                                       nix_max_prio(dev, hw_lvl) + 1;
+               cap->nonleaf.sched_wfq_n_groups_max = 1;
+               cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+               cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+               cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+
+               if (nix_tm_have_tl1_access(dev))
+                       cap->nonleaf.stats_mask =
+                               RTE_TM_STATS_N_PKTS_RED_DROPPED |
+                               RTE_TM_STATS_N_BYTES_RED_DROPPED;
+       } else if ((lvl < OTX2_TM_LVL_MAX) &&
+                  (hw_lvl < NIX_TXSCH_LVL_CNT)) {
+               /* TL2, TL3, TL4, MDQ */
+               cap->n_nodes_max = rsp->schq[hw_lvl];
+               cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+               cap->non_leaf_nodes_identical = 1;
+
+               cap->nonleaf.shaper_private_supported = true;
+               cap->nonleaf.shaper_private_dual_rate_supported = true;
+               cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+               cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+               cap->nonleaf.shaper_private_packet_mode_supported = 1;
+               cap->nonleaf.shaper_private_byte_mode_supported = 1;
+
+               /* MDQ doesn't support Strict Priority */
+               if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+                       cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
+               else
+                       cap->nonleaf.sched_n_children_max =
+                               rsp->schq[hw_lvl - 1];
+               cap->nonleaf.sched_sp_n_priorities_max =
+                       nix_max_prio(dev, hw_lvl) + 1;
+               cap->nonleaf.sched_wfq_n_groups_max = 1;
+               cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+               cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+               cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+       } else {
+               /* unsupported level */
+               error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+               return rc;
+       }
+       return 0;
+}
+
+static int
+otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+                         struct rte_tm_node_capabilities *cap,
+                         struct rte_tm_error *error)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct otx2_mbox *mbox = dev->mbox;
+       struct otx2_nix_tm_node *tm_node;
+       struct free_rsrcs_rsp *rsp;
+       int rc, hw_lvl, lvl;
 
+       memset(cap, 0, sizeof(*cap));
+
+       tm_node = nix_tm_node_search(dev, node_id, true);
+       if (!tm_node) {
+               error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+               error->message = "no such node";
+               return -EINVAL;
+       }
+
+       hw_lvl = tm_node->hw_lvl;
+       lvl = tm_node->lvl;
+
+       /* Leaf node */
+       if (nix_tm_is_leaf(dev, lvl)) {
+               cap->stats_mask = RTE_TM_STATS_N_PKTS |
+                                       RTE_TM_STATS_N_BYTES;
+               return 0;
+       }
+
+       otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+       rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+       if (rc) {
+               error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+               error->message = "unexpected fatal error";
+               return rc;
+       }
+
+       /* Non Leaf Shaper */
+       cap->shaper_private_supported = true;
+       cap->shaper_private_dual_rate_supported =
+               (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
+       cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+       cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+       cap->shaper_private_packet_mode_supported = 1;
+       cap->shaper_private_byte_mode_supported = 1;
+
+       /* Non Leaf Scheduler */
+       if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+               cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
+       else
+               cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
+
+       cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
+       cap->nonleaf.sched_wfq_n_children_per_group_max =
+               cap->nonleaf.sched_n_children_max;
+       cap->nonleaf.sched_wfq_n_groups_max = 1;
+       cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+       cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+       cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+
+       if (hw_lvl == NIX_TXSCH_LVL_TL1)
+               cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+                       RTE_TM_STATS_N_BYTES_RED_DROPPED;
        return 0;
 }
 
@@ -1886,6 +2159,13 @@ otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
                }
        }
 
+       if (params->pkt_length_adjust < NIX_LENGTH_ADJUST_MIN ||
+           params->pkt_length_adjust > NIX_LENGTH_ADJUST_MAX) {
+               error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+               error->message = "length adjust invalid";
+               return -EINVAL;
+       }
+
        profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
                              sizeof(struct otx2_nix_tm_shaper_profile), 0);
        if (!profile)
@@ -1898,13 +2178,14 @@ otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
 
        otx2_tm_dbg("Added TM shaper profile %u, "
                    " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
-                   ", cbs %" PRIu64 " , adj %u",
+                   ", cbs %" PRIu64 " , adj %u, pkt mode %d",
                    profile_id,
                    params->peak.rate * 8,
                    params->peak.size,
                    params->committed.rate * 8,
                    params->committed.size,
-                   params->pkt_length_adjust);
+                   params->pkt_length_adjust,
+                   params->packet_mode);
 
        /* Translate rate as bits per second */
        profile->params.peak.rate = profile->params.peak.rate * 8;
@@ -1960,9 +2241,11 @@ otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
                     struct rte_tm_error *error)
 {
        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct otx2_nix_tm_shaper_profile *profile = NULL;
        struct otx2_nix_tm_node *parent_node;
-       int rc, clear_on_fail = 0;
-       uint32_t exp_next_lvl;
+       int rc, pkt_mode, clear_on_fail = 0;
+       uint32_t exp_next_lvl, i;
+       uint32_t profile_id;
        uint16_t hw_lvl;
 
        /* we don't support dynamic updates */
@@ -2024,13 +2307,45 @@ otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
                return -EINVAL;
        }
 
-       /* Check if shaper profile exists for non leaf node */
-       if (!nix_tm_is_leaf(dev, lvl) &&
-           params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
-           !nix_tm_shaper_profile_search(dev, params->shaper_profile_id)) {
-               error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
-               error->message = "invalid shaper profile";
-               return -EINVAL;
+       if (!nix_tm_is_leaf(dev, lvl)) {
+               /* Check if shaper profile exists for non leaf node */
+               profile_id = params->shaper_profile_id;
+               profile = nix_tm_shaper_profile_search(dev, profile_id);
+               if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE && !profile) {
+                       error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+                       error->message = "invalid shaper profile";
+                       return -EINVAL;
+               }
+
+               /* Minimum static priority count is 1 */
+               if (!params->nonleaf.n_sp_priorities ||
+                   params->nonleaf.n_sp_priorities > TXSCH_TLX_SP_PRIO_MAX) {
+                       error->type =
+                               RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+                       error->message = "invalid sp priorities";
+                       return -EINVAL;
+               }
+
+               pkt_mode = 0;
+               /* Validate weight mode */
+               for (i = 0; i < params->nonleaf.n_sp_priorities &&
+                    params->nonleaf.wfq_weight_mode; i++) {
+                       pkt_mode = !params->nonleaf.wfq_weight_mode[i];
+                       if (pkt_mode == !params->nonleaf.wfq_weight_mode[0])
+                               continue;
+
+                       error->type =
+                               RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+                       error->message = "unsupported weight mode";
+                       return -EINVAL;
+               }
+
+               if (profile && params->nonleaf.n_sp_priorities &&
+                   pkt_mode != profile->params.packet_mode) {
+                       error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+                       error->message = "shaper wfq packet mode mismatch";
+                       return -EINVAL;
+               }
        }
 
        /* Check if there is second DWRR already in siblings or holes in prio */
@@ -2272,6 +2587,12 @@ otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
                }
        }
 
+       if (profile && profile->params.packet_mode != tm_node->pkt_mode) {
+               error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+               error->message = "shaper profile pkt mode mismatch";
+               return -EINVAL;
+       }
+
        tm_node->params.shaper_profile_id = profile_id;
 
        /* Nothing to do if not yet committed */
@@ -2290,6 +2611,8 @@ otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
        if (rc)
                return rc;
 
+       shaper_default_red_algo(dev, tm_node, profile);
+
        /* Update the PIR/CIR and clear SW XOFF */
        req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
        req->lvl = tm_node->hw_lvl;
@@ -2515,6 +2838,10 @@ exit:
 const struct rte_tm_ops otx2_tm_ops = {
        .node_type_get = otx2_nix_tm_node_type_get,
 
+       .capabilities_get = otx2_nix_tm_capa_get,
+       .level_capabilities_get = otx2_nix_tm_level_capa_get,
+       .node_capabilities_get = otx2_nix_tm_node_capa_get,
+
        .shaper_profile_add = otx2_nix_tm_shaper_profile_add,
        .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
 
@@ -2910,6 +3237,24 @@ error:
        return -EINVAL;
 }
 
+int
+otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+       if (!arg)
+               return -EINVAL;
+
+       /* Check for supported revisions */
+       if (otx2_dev_is_95xx_Ax(dev) ||
+           otx2_dev_is_96xx_Ax(dev))
+               return -EINVAL;
+
+       *(const void **)arg = &otx2_tm_ops;
+
+       return 0;
+}
+
 int
 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
 {