+ uint8_t contig_count;
+
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (lvl == tm_node->hw_lvl) {
+ req->schq[lvl - 1] += tm_node->rr_num;
+ if (tm_node->max_prio != UINT32_MAX) {
+ contig_count = tm_node->max_prio + 1;
+ req->schq_contig[lvl - 1] += contig_count;
+ }
+ }
+ if (lvl == dev->otx2_tm_root_lvl &&
+ dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
+ tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
+ req->schq_contig[dev->otx2_tm_root_lvl]++;
+ }
+ }
+
+ req->schq[NIX_TXSCH_LVL_TL1] = 1;
+ req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
+
+ return 0;
+}
+
+static int
+nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
+ struct nix_txsch_alloc_req *req)
+{
+ uint8_t i;
+
+ for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
+ nix_tm_count_req_schq(dev, req, i);
+
+ for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
+ dev->txschq_index[i] = 0;
+ dev->txschq_contig_index[i] = 0;
+ }
+ return 0;
+}
+
+static int
+nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_txsch_alloc_req *req;
+ struct nix_txsch_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
+
+ rc = nix_tm_prepare_txschq_req(dev, req);
+ if (rc)
+ return rc;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ nix_tm_copy_rsp_to_dev(dev, rsp);
+ dev->link_cfg_lvl = rsp->link_cfg_lvl;
+
+ nix_tm_assign_hw_id(dev);
+ return 0;
+}
+
+static int
+nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *tm_node;
+ struct otx2_eth_txq *txq;
+ uint16_t sq;
+ int rc;
+
+ nix_tm_update_parent_info(dev);
+
+ rc = nix_tm_send_txsch_alloc_msg(dev);
+ if (rc) {
+ otx2_err("TM failed to alloc tm resources=%d", rc);
+ return rc;
+ }
+
+ rc = nix_tm_txsch_reg_config(dev);
+ if (rc) {
+ otx2_err("TM failed to configure sched registers=%d", rc);
+ return rc;
+ }
+
+ /* Trigger MTU recalculate as SMQ needs MTU conf */
+ if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {
+ rc = otx2_nix_recalc_mtu(eth_dev);
+ if (rc) {
+ otx2_err("TM MTU update failed, rc=%d", rc);
+ return rc;
+ }
+ }
+
+ /* Mark all non-leaf's as enabled */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (!nix_tm_is_leaf(dev, tm_node->lvl))
+ tm_node->flags |= NIX_TM_NODE_ENABLED;
+ }
+
+ if (!xmit_enable)
+ return 0;
+
+ /* Update SQ Sched Data while SQ is idle */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (!nix_tm_is_leaf(dev, tm_node->lvl))
+ continue;
+
+ rc = nix_sq_sched_data(dev, tm_node, false);
+ if (rc) {
+ otx2_err("SQ %u sched update failed, rc=%d",
+ tm_node->id, rc);
+ return rc;
+ }
+ }
+
+ /* Finally XON all SMQ's */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+
+ rc = nix_smq_xoff(dev, tm_node, false);
+ if (rc) {
+ otx2_err("Failed to enable smq %u, rc=%d",
+ tm_node->hw_id, rc);
+ return rc;
+ }
+ }
+
+ /* Enable xmit as all the topology is ready */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (!nix_tm_is_leaf(dev, tm_node->lvl))
+ continue;
+
+ sq = tm_node->id;
+ txq = eth_dev->data->tx_queues[sq];
+
+ rc = otx2_nix_sq_enable(txq);
+ if (rc) {
+ otx2_err("TM sw xon failed on SQ %u, rc=%d",
+ tm_node->id, rc);
+ return rc;
+ }
+ tm_node->flags |= NIX_TM_NODE_ENABLED;
+ }
+
+ return 0;
+}
+
+static int
+send_tm_reqval(struct otx2_mbox *mbox,
+ struct nix_txschq_config *req,
+ struct rte_tm_error *error)
+{
+ int rc;
+
+ if (!req->num_regs ||
+ req->num_regs > MAX_REGS_PER_MBOX_MSG) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "invalid config";
+ return -EIO;
+ }
+
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ }
+ return rc;
+}
+
+static uint16_t
+nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl)
+{
+ if (nix_tm_have_tl1_access(dev)) {
+ switch (lvl) {
+ case OTX2_TM_LVL_ROOT:
+ return NIX_TXSCH_LVL_TL1;
+ case OTX2_TM_LVL_SCH1:
+ return NIX_TXSCH_LVL_TL2;
+ case OTX2_TM_LVL_SCH2:
+ return NIX_TXSCH_LVL_TL3;
+ case OTX2_TM_LVL_SCH3:
+ return NIX_TXSCH_LVL_TL4;
+ case OTX2_TM_LVL_SCH4:
+ return NIX_TXSCH_LVL_SMQ;
+ default:
+ return NIX_TXSCH_LVL_CNT;
+ }
+ } else {
+ switch (lvl) {
+ case OTX2_TM_LVL_ROOT:
+ return NIX_TXSCH_LVL_TL2;
+ case OTX2_TM_LVL_SCH1:
+ return NIX_TXSCH_LVL_TL3;
+ case OTX2_TM_LVL_SCH2:
+ return NIX_TXSCH_LVL_TL4;
+ case OTX2_TM_LVL_SCH3:
+ return NIX_TXSCH_LVL_SMQ;
+ default:
+ return NIX_TXSCH_LVL_CNT;
+ }
+ }
+}
+
+static uint16_t
+nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl)
+{
+ if (hw_lvl >= NIX_TXSCH_LVL_CNT)
+ return 0;
+
+ /* MDQ doesn't support SP */
+ if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+ return 0;
+
+ /* PF's TL1 with VF's enabled doesn't support SP */
+ if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
+ (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
+ (dev->tm_flags & NIX_TM_TL1_NO_SP)))
+ return 0;
+
+ return TXSCH_TLX_SP_PRIO_MAX - 1;
+}
+
+
+static int
+validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
+ uint32_t parent_id, uint32_t priority,
+ struct rte_tm_error *error)
+{
+ uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX];
+ struct otx2_nix_tm_node *tm_node;
+ uint32_t rr_num = 0;
+ int i;
+
+ /* Validate priority against max */
+ if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "unsupported priority value";
+ return -EINVAL;
+ }
+
+ if (parent_id == RTE_TM_NODE_ID_NULL)
+ return 0;
+
+ memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX);
+ priorities[priority] = 1;
+
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (!tm_node->parent)
+ continue;
+
+ if (!(tm_node->flags & NIX_TM_NODE_USER))
+ continue;
+
+ if (tm_node->parent->id != parent_id)
+ continue;
+
+ priorities[tm_node->priority]++;
+ }
+
+ for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++)
+ if (priorities[i] > 1)
+ rr_num++;
+
+ /* At max, one rr groups per parent */
+ if (rr_num > 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "multiple DWRR node priority";
+ return -EINVAL;
+ }
+
+ /* Check for previous priority to avoid holes in priorities */
+ if (priority && !priorities[priority - 1]) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority not in order";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+read_tm_reg(struct otx2_mbox *mbox, uint64_t reg,
+ uint64_t *regval, uint32_t hw_lvl)
+{
+ volatile struct nix_txschq_config *req;
+ struct nix_txschq_config *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->read = 1;
+ req->lvl = hw_lvl;
+ req->reg[0] = reg;
+ req->num_regs = 1;
+
+ rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
+ if (rc)
+ return rc;
+ *regval = rsp->regval[0];
+ return 0;
+}
+
+/* Search for min rate in topology */
+static void
+nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev)
+{
+ struct otx2_nix_tm_shaper_profile *profile;
+ uint64_t rate_min = 1E9; /* 1 Gbps */
+
+ TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) {
+ if (profile->params.peak.rate &&
+ profile->params.peak.rate < rate_min)
+ rate_min = profile->params.peak.rate;
+
+ if (profile->params.committed.rate &&
+ profile->params.committed.rate < rate_min)
+ rate_min = profile->params.committed.rate;
+ }
+
+ dev->tm_rate_min = rate_min;
+}
+
+static int
+nix_xmit_disable(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
+ uint16_t sqb_cnt, head_off, tail_off;
+ struct otx2_nix_tm_node *tm_node;
+ struct otx2_eth_txq *txq;
+ uint64_t wdata, val;
+ int i, rc;
+
+ otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
+
+ /* Enable CGX RXTX to drain pkts */
+ if (!eth_dev->data->dev_started) {
+ otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
+ rc = otx2_mbox_process(dev->mbox);
+ if (rc)
+ return rc;
+ }
+
+ /* XON all SMQ's */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+ if (!(tm_node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ rc = nix_smq_xoff(dev, tm_node, false);
+ if (rc) {
+ otx2_err("Failed to enable smq %u, rc=%d",
+ tm_node->hw_id, rc);
+ goto cleanup;
+ }
+ }
+
+ /* Flush all tx queues */
+ for (i = 0; i < sq_cnt; i++) {
+ txq = eth_dev->data->tx_queues[i];
+
+ rc = otx2_nix_sq_sqb_aura_fc(txq, false);
+ if (rc) {
+ otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
+ goto cleanup;
+ }
+
+ /* Wait for sq entries to be flushed */
+ rc = nix_txq_flush_sq_spin(txq);
+ if (rc) {
+ otx2_err("Failed to drain sq, rc=%d\n", rc);
+ goto cleanup;
+ }
+ }
+
+ /* XOFF & Flush all SMQ's. HRM mandates
+ * all SQ's empty before SMQ flush is issued.
+ */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+ if (!(tm_node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ rc = nix_smq_xoff(dev, tm_node, true);
+ if (rc) {
+ otx2_err("Failed to enable smq %u, rc=%d",
+ tm_node->hw_id, rc);
+ goto cleanup;
+ }
+ }
+
+ /* Verify sanity of all tx queues */
+ for (i = 0; i < sq_cnt; i++) {
+ txq = eth_dev->data->tx_queues[i];
+
+ wdata = ((uint64_t)txq->sq << 32);
+ val = otx2_atomic64_add_nosync(wdata,
+ (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
+
+ sqb_cnt = val & 0xFFFF;
+ head_off = (val >> 20) & 0x3F;
+ tail_off = (val >> 28) & 0x3F;
+
+ if (sqb_cnt > 1 || head_off != tail_off ||
+ (*txq->fc_mem != txq->nb_sqb_bufs))
+ otx2_err("Failed to gracefully flush sq %u", txq->sq);
+ }
+
+cleanup:
+ /* restore cgx state */
+ if (!eth_dev->data->dev_started) {
+ otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
+ rc |= otx2_mbox_process(dev->mbox);
+ }
+
+ return rc;
+}
+
+static int
+otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *tm_node;
+
+ if (is_leaf == NULL) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return -EINVAL;
+ }
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ return -EINVAL;
+ }
+ if (nix_tm_is_leaf(dev, tm_node->lvl))
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+ return 0;
+}
+
+static int
+otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc, max_nr_nodes = 0, i;
+ struct free_rsrcs_rsp *rsp;
+
+ memset(cap, 0, sizeof(*cap));
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
+ max_nr_nodes += rsp->schq[i];
+
+ cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
+ /* TL1 level is reserved for PF */
+ cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
+ OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+
+ /* Shaper Capabilities */
+ cap->shaper_private_n_max = max_nr_nodes;
+ cap->shaper_n_max = max_nr_nodes;
+ cap->shaper_private_dual_rate_n_max = max_nr_nodes;
+ cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+ cap->shaper_private_packet_mode_supported = 1;
+ cap->shaper_private_byte_mode_supported = 1;
+ cap->shaper_pkt_length_adjust_min = NIX_LENGTH_ADJUST_MIN;
+ cap->shaper_pkt_length_adjust_max = NIX_LENGTH_ADJUST_MAX;
+
+ /* Schedule Capabilities */
+ cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
+ cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
+ cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+ cap->sched_wfq_n_groups_max = 1;
+ cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+ cap->sched_wfq_packet_mode_supported = 1;
+ cap->sched_wfq_byte_mode_supported = 1;
+
+ cap->dynamic_update_mask =
+ RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
+ RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
+ cap->stats_mask =
+ RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES |
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+
+ for (i = 0; i < RTE_COLORS; i++) {
+ cap->mark_vlan_dei_supported[i] = false;
+ cap->mark_ip_ecn_tcp_supported[i] = false;
+ cap->mark_ip_dscp_supported[i] = false;
+ }
+
+ return 0;
+}
+
+static int
+otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct free_rsrcs_rsp *rsp;
+ uint16_t hw_lvl;
+ int rc;
+
+ memset(cap, 0, sizeof(*cap));
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ hw_lvl = nix_tm_lvl2nix(dev, lvl);
+
+ if (nix_tm_is_leaf(dev, lvl)) {
+ /* Leaf */
+ cap->n_nodes_max = dev->tm_leaf_cnt;
+ cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
+ cap->leaf_nodes_identical = 1;
+ cap->leaf.stats_mask =
+ RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+
+ } else if (lvl == OTX2_TM_LVL_ROOT) {
+ /* Root node, aka TL2(vf)/TL1(pf) */
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported =
+ nix_tm_have_tl1_access(dev) ? false : true;
+ cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_packet_mode_supported = 1;
+ cap->nonleaf.shaper_private_byte_mode_supported = 1;
+
+ cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ nix_max_prio(dev, hw_lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+
+ if (nix_tm_have_tl1_access(dev))
+ cap->nonleaf.stats_mask =
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ } else if ((lvl < OTX2_TM_LVL_MAX) &&
+ (hw_lvl < NIX_TXSCH_LVL_CNT)) {
+ /* TL2, TL3, TL4, MDQ */
+ cap->n_nodes_max = rsp->schq[hw_lvl];
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = true;
+ cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_packet_mode_supported = 1;
+ cap->nonleaf.shaper_private_byte_mode_supported = 1;
+
+ /* MDQ doesn't support Strict Priority */
+ if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+ cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
+ else
+ cap->nonleaf.sched_n_children_max =
+ rsp->schq[hw_lvl - 1];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ nix_max_prio(dev, hw_lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+ } else {
+ /* unsupported level */
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return rc;
+ }
+ return 0;
+}
+
+static int
+otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct otx2_nix_tm_node *tm_node;
+ struct free_rsrcs_rsp *rsp;
+ int rc, hw_lvl, lvl;
+
+ memset(cap, 0, sizeof(*cap));
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ hw_lvl = tm_node->hw_lvl;
+ lvl = tm_node->lvl;
+
+ /* Leaf node */
+ if (nix_tm_is_leaf(dev, lvl)) {
+ cap->stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+ return 0;
+ }
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ /* Non Leaf Shaper */
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported =
+ (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
+ cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+ cap->shaper_private_packet_mode_supported = 1;
+ cap->shaper_private_byte_mode_supported = 1;
+
+ /* Non Leaf Scheduler */
+ if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+ cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
+ else
+ cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
+
+ cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+
+ if (hw_lvl == NIX_TXSCH_LVL_TL1)
+ cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ return 0;
+}
+
+static int
+otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
+ uint32_t profile_id,
+ struct rte_tm_shaper_params *params,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_shaper_profile *profile;
+
+ profile = nix_tm_shaper_profile_search(dev, profile_id);
+ if (profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "shaper profile ID exist";
+ return -EINVAL;
+ }
+
+ /* Committed rate and burst size can be enabled/disabled */
+ if (params->committed.size || params->committed.rate) {
+ if (params->committed.size < MIN_SHAPER_BURST ||
+ params->committed.size > MAX_SHAPER_BURST) {
+ error->type =
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ return -EINVAL;
+ } else if (!shaper_rate_to_nix(params->committed.rate * 8,
+ NULL, NULL, NULL)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "shaper committed rate invalid";
+ return -EINVAL;
+ }
+ }
+
+ /* Peak rate and burst size can be enabled/disabled */
+ if (params->peak.size || params->peak.rate) {
+ if (params->peak.size < MIN_SHAPER_BURST ||
+ params->peak.size > MAX_SHAPER_BURST) {
+ error->type =
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ return -EINVAL;
+ } else if (!shaper_rate_to_nix(params->peak.rate * 8,
+ NULL, NULL, NULL)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "shaper peak rate invalid";
+ return -EINVAL;
+ }
+ }
+
+ if (params->pkt_length_adjust < NIX_LENGTH_ADJUST_MIN ||
+ params->pkt_length_adjust > NIX_LENGTH_ADJUST_MAX) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "length adjust invalid";
+ return -EINVAL;
+ }
+
+ profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
+ sizeof(struct otx2_nix_tm_shaper_profile), 0);
+ if (!profile)
+ return -ENOMEM;
+
+ profile->shaper_profile_id = profile_id;
+ rte_memcpy(&profile->params, params,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper);
+
+ otx2_tm_dbg("Added TM shaper profile %u, "
+ " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
+ ", cbs %" PRIu64 " , adj %u, pkt mode %d",
+ profile_id,
+ params->peak.rate * 8,
+ params->peak.size,
+ params->committed.rate * 8,
+ params->committed.size,
+ params->pkt_length_adjust,
+ params->packet_mode);
+
+ /* Translate rate as bits per second */
+ profile->params.peak.rate = profile->params.peak.rate * 8;
+ profile->params.committed.rate = profile->params.committed.rate * 8;
+ /* Always use PIR for single rate shaping */
+ if (!params->peak.rate && params->committed.rate) {
+ profile->params.peak = profile->params.committed;
+ memset(&profile->params.committed, 0,
+ sizeof(profile->params.committed));
+ }
+
+ /* update min rate */
+ nix_tm_shaper_profile_update_min(dev);
+ return 0;
+}
+
+static int
+otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
+ uint32_t profile_id,
+ struct rte_tm_error *error)
+{
+ struct otx2_nix_tm_shaper_profile *profile;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ profile = nix_tm_shaper_profile_search(dev, profile_id);
+
+ if (!profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "shaper profile ID not exist";
+ return -EINVAL;
+ }
+
+ if (profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "shaper profile in use";
+ return -EINVAL;
+ }
+
+ otx2_tm_dbg("Removing TM shaper profile %u", profile_id);
+ TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper);
+ rte_free(profile);
+
+ /* update min rate */
+ nix_tm_shaper_profile_update_min(dev);
+ return 0;
+}
+
+static int
+otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t lvl,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_shaper_profile *profile = NULL;
+ struct otx2_nix_tm_node *parent_node;
+ int rc, pkt_mode, clear_on_fail = 0;
+ uint32_t exp_next_lvl, i;
+ uint32_t profile_id;
+ uint16_t hw_lvl;
+
+ /* we don't support dynamic updates */
+ if (dev->tm_flags & NIX_TM_COMMITTED) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "dynamic update not supported";
+ return -EIO;
+ }
+
+ /* Leaf nodes have to be same priority */
+ if (nix_tm_is_leaf(dev, lvl) && priority != 0) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "queue shapers must be priority 0";
+ return -EIO;
+ }
+
+ parent_node = nix_tm_node_search(dev, parent_node_id, true);
+
+ /* find the right level */
+ if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ lvl = OTX2_TM_LVL_ROOT;
+ } else if (parent_node) {
+ lvl = parent_node->lvl + 1;
+ } else {
+ /* Neigher proper parent nor proper level id given */
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "invalid parent node id";
+ return -ERANGE;
+ }
+ }
+
+ /* Translate rte_tm level id's to nix hw level id's */
+ hw_lvl = nix_tm_lvl2nix(dev, lvl);
+ if (hw_lvl == NIX_TXSCH_LVL_CNT &&
+ !nix_tm_is_leaf(dev, lvl)) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "invalid level id";
+ return -ERANGE;
+ }
+
+ if (node_id < dev->tm_leaf_cnt)
+ exp_next_lvl = NIX_TXSCH_LVL_SMQ;
+ else
+ exp_next_lvl = hw_lvl + 1;
+
+ /* Check if there is no parent node yet */
+ if (hw_lvl != dev->otx2_tm_root_lvl &&
+ (!parent_node || parent_node->hw_lvl != exp_next_lvl)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "invalid parent node id";
+ return -EINVAL;
+ }
+
+ /* Check if a node already exists */
+ if (nix_tm_node_search(dev, node_id, true)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node already exists";
+ return -EINVAL;
+ }
+
+ if (!nix_tm_is_leaf(dev, lvl)) {
+ /* Check if shaper profile exists for non leaf node */
+ profile_id = params->shaper_profile_id;
+ profile = nix_tm_shaper_profile_search(dev, profile_id);
+ if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE && !profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "invalid shaper profile";
+ return -EINVAL;
+ }
+
+ /* Minimum static priority count is 1 */
+ if (!params->nonleaf.n_sp_priorities ||
+ params->nonleaf.n_sp_priorities > TXSCH_TLX_SP_PRIO_MAX) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "invalid sp priorities";
+ return -EINVAL;
+ }
+
+ pkt_mode = 0;
+ /* Validate weight mode */
+ for (i = 0; i < params->nonleaf.n_sp_priorities &&
+ params->nonleaf.wfq_weight_mode; i++) {
+ pkt_mode = !params->nonleaf.wfq_weight_mode[i];
+ if (pkt_mode == !params->nonleaf.wfq_weight_mode[0])
+ continue;
+
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "unsupported weight mode";
+ return -EINVAL;
+ }
+
+ if (profile && params->nonleaf.n_sp_priorities &&
+ pkt_mode != profile->params.packet_mode) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "shaper wfq packet mode mismatch";
+ return -EINVAL;
+ }
+ }
+
+ /* Check if there is second DWRR already in siblings or holes in prio */
+ if (validate_prio(dev, lvl, parent_node_id, priority, error))
+ return -EINVAL;
+
+ if (weight > MAX_SCHED_WEIGHT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "max weight exceeded";
+ return -EINVAL;
+ }
+
+ rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id,
+ priority, weight, hw_lvl,
+ lvl, true, params);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ /* cleanup user added nodes */
+ if (clear_on_fail)
+ nix_tm_free_resources(dev, NIX_TM_NODE_USER,
+ NIX_TM_NODE_USER, false);
+ error->message = "failed to add node";
+ return rc;
+ }
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+ return 0;
+}
+
+static int
+otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *tm_node, *child_node;
+ struct otx2_nix_tm_shaper_profile *profile;
+ uint32_t profile_id;
+
+ /* we don't support dynamic updates yet */
+ if (dev->tm_flags & NIX_TM_COMMITTED) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "hierarchy exists";
+ return -EIO;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* Check for any existing children */
+ TAILQ_FOREACH(child_node, &dev->node_list, node) {
+ if (child_node->parent == tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "children exist";
+ return -EINVAL;
+ }
+ }
+
+ /* Remove shaper profile reference */
+ profile_id = tm_node->params.shaper_profile_id;
+ profile = nix_tm_shaper_profile_search(dev, profile_id);
+ profile->reference_count--;
+
+ TAILQ_REMOVE(&dev->node_list, tm_node, node);
+ rte_free(tm_node);
+ return 0;
+}
+
+static int
+nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error, bool suspend)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct otx2_nix_tm_node *tm_node;
+ struct nix_txschq_config *req;
+ uint16_t flags;
+ int rc;
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "hierarchy doesn't exist";
+ return -EINVAL;
+ }
+
+ flags = tm_node->flags;
+ flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
+ (flags | NIX_TM_NODE_ENABLED);
+
+ if (tm_node->flags == flags)
+ return 0;
+
+ /* send mbox for state change */
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+
+ req->lvl = tm_node->hw_lvl;
+ req->num_regs = prepare_tm_sw_xoff(tm_node, suspend,
+ req->reg, req->regval);
+ rc = send_tm_reqval(mbox, req, error);
+ if (!rc)
+ tm_node->flags = flags;
+ return rc;
+}
+
+static int
+otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ return nix_tm_node_suspend_resume(eth_dev, node_id, error, true);
+}
+
+static int
+otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
+}
+
+static int
+otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *tm_node;
+ uint32_t leaf_cnt = 0;
+ int rc;