X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Focteontx2%2Fotx2_tm.c;h=8ed059549f59b31e3a7dd7c14c7baf019b4ec669;hb=6dc83230b43b1a69603f61e55ddc4e5905336365;hp=cdd7c9b79e9fbdcd94c32ed046cf32935778e630;hpb=2746e76b2a0c0825f2fca7a969db79cebf39316f;p=dpdk.git diff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c index cdd7c9b79e..8ed059549f 100644 --- a/drivers/net/octeontx2/otx2_tm.c +++ b/drivers/net/octeontx2/otx2_tm.c @@ -28,8 +28,8 @@ uint64_t shaper2regval(struct shaper_params *shaper) (shaper->mantissa << 1); } -static int -nix_get_link(struct otx2_eth_dev *dev) +int +otx2_nix_get_link(struct otx2_eth_dev *dev) { int link = 13 /* SDP */; uint16_t lmac_chan; @@ -237,6 +237,30 @@ shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile, &pir->burst_mantissa); } +static void +shaper_default_red_algo(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node, + struct otx2_nix_tm_shaper_profile *profile) +{ + struct shaper_params cir, pir; + + /* C0 doesn't support STALL when both PIR & CIR are enabled */ + if (profile && otx2_dev_is_96xx_Cx(dev)) { + memset(&cir, 0, sizeof(cir)); + memset(&pir, 0, sizeof(pir)); + shaper_config_to_nix(profile, &cir, &pir); + + if (pir.rate && cir.rate) { + tm_node->red_algo = NIX_REDALG_DISCARD; + tm_node->flags |= NIX_TM_NODE_RED_DISCARD; + return; + } + } + + tm_node->red_algo = NIX_REDALG_STD; + tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD; +} + static int populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq) { @@ -531,10 +555,13 @@ populate_tm_reg(struct otx2_eth_dev *dev, switch (hw_lvl) { case NIX_TXSCH_LVL_SMQ: - /* Set xoff which will be cleared later */ + /* Set xoff which will be cleared later and minimum length + * which will be used for zero padding if packet length is + * smaller + */ reg[k] = NIX_AF_SMQX_CFG(schq); - regval[k] = BIT_ULL(50); - regval_mask[k] = ~BIT_ULL(50); + regval[k] = BIT_ULL(50) | NIX_MIN_HW_FRS; + regval_mask[k] = ~(BIT_ULL(50) | 0x7f); k++; /* Parent and schedule conf */ @@ -574,7 +601,7 @@ populate_tm_reg(struct otx2_eth_dev *dev, if (!otx2_dev_is_sdp(dev) && dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) { reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, - nix_get_link(dev)); + otx2_nix_get_link(dev)); regval[k] = BIT_ULL(12) | nix_get_relchan(dev); k++; } @@ -594,7 +621,7 @@ populate_tm_reg(struct otx2_eth_dev *dev, if (!otx2_dev_is_sdp(dev) && dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) { reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, - nix_get_link(dev)); + otx2_nix_get_link(dev)); regval[k] = BIT_ULL(12) | nix_get_relchan(dev); k++; } @@ -744,7 +771,6 @@ nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id, { struct otx2_nix_tm_shaper_profile *profile; struct otx2_nix_tm_node *tm_node, *parent_node; - struct shaper_params cir, pir; uint32_t profile_id; profile_id = params->shaper_profile_id; @@ -778,19 +804,9 @@ nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id, if (profile) profile->reference_count++; - memset(&cir, 0, sizeof(cir)); - memset(&pir, 0, sizeof(pir)); - shaper_config_to_nix(profile, &cir, &pir); - tm_node->parent = parent_node; tm_node->parent_hw_id = UINT32_MAX; - /* C0 doesn't support STALL when both PIR & CIR are enabled */ - if (lvl < OTX2_TM_LVL_QUEUE && - otx2_dev_is_96xx_Cx(dev) && - pir.rate && cir.rate) - tm_node->red_algo = NIX_REDALG_DISCARD; - else - tm_node->red_algo = NIX_REDALG_STD; + shaper_default_red_algo(dev, tm_node, profile); TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node); @@ -990,6 +1006,7 @@ nix_txq_flush_sq_spin(struct otx2_eth_txq *txq) return 0; exit: + otx2_nix_tm_dump(dev); return -EFAULT; } @@ -1540,6 +1557,28 @@ nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable) return 0; } +static int +send_tm_reqval(struct otx2_mbox *mbox, + struct nix_txschq_config *req, + struct rte_tm_error *error) +{ + int rc; + + if (!req->num_regs || + req->num_regs > MAX_REGS_PER_MBOX_MSG) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "invalid config"; + return -EIO; + } + + rc = otx2_mbox_process(mbox); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + } + return rc; +} + static uint16_t nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl) { @@ -1652,133 +1691,328 @@ validate_prio(struct otx2_eth_dev *dev, uint32_t lvl, } static int -otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id, - uint32_t parent_node_id, uint32_t priority, - uint32_t weight, uint32_t lvl, - struct rte_tm_node_params *params, - struct rte_tm_error *error) +read_tm_reg(struct otx2_mbox *mbox, uint64_t reg, + uint64_t *regval, uint32_t hw_lvl) { - struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); - struct otx2_nix_tm_node *parent_node; - int rc, clear_on_fail = 0; - uint32_t exp_next_lvl; - uint16_t hw_lvl; + volatile struct nix_txschq_config *req; + struct nix_txschq_config *rsp; + int rc; - /* we don't support dynamic updates */ - if (dev->tm_flags & NIX_TM_COMMITTED) { - error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; - error->message = "dynamic update not supported"; - return -EIO; + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->read = 1; + req->lvl = hw_lvl; + req->reg[0] = reg; + req->num_regs = 1; + + rc = otx2_mbox_process_msg(mbox, (void **)&rsp); + if (rc) + return rc; + *regval = rsp->regval[0]; + return 0; +} + +/* Search for min rate in topology */ +static void +nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev) +{ + struct otx2_nix_tm_shaper_profile *profile; + uint64_t rate_min = 1E9; /* 1 Gbps */ + + TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) { + if (profile->params.peak.rate && + profile->params.peak.rate < rate_min) + rate_min = profile->params.peak.rate; + + if (profile->params.committed.rate && + profile->params.committed.rate < rate_min) + rate_min = profile->params.committed.rate; } - /* Leaf nodes have to be same priority */ - if (nix_tm_is_leaf(dev, lvl) && priority != 0) { - error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; - error->message = "queue shapers must be priority 0"; - return -EIO; + dev->tm_rate_min = rate_min; +} + +static int +nix_xmit_disable(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t sq_cnt = eth_dev->data->nb_tx_queues; + uint16_t sqb_cnt, head_off, tail_off; + struct otx2_nix_tm_node *tm_node; + struct otx2_eth_txq *txq; + uint64_t wdata, val; + int i, rc; + + otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name); + + /* Enable CGX RXTX to drain pkts */ + if (!eth_dev->data->dev_started) { + otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox); + rc = otx2_mbox_process(dev->mbox); + if (rc) + return rc; } - parent_node = nix_tm_node_search(dev, parent_node_id, true); + /* XON all SMQ's */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ) + continue; + if (!(tm_node->flags & NIX_TM_NODE_HWRES)) + continue; - /* find the right level */ - if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) { - if (parent_node_id == RTE_TM_NODE_ID_NULL) { - lvl = OTX2_TM_LVL_ROOT; - } else if (parent_node) { - lvl = parent_node->lvl + 1; - } else { - /* Neigher proper parent nor proper level id given */ - error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; - error->message = "invalid parent node id"; - return -ERANGE; + rc = nix_smq_xoff(dev, tm_node, false); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->hw_id, rc); + goto cleanup; } } - /* Translate rte_tm level id's to nix hw level id's */ - hw_lvl = nix_tm_lvl2nix(dev, lvl); - if (hw_lvl == NIX_TXSCH_LVL_CNT && - !nix_tm_is_leaf(dev, lvl)) { - error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; - error->message = "invalid level id"; - return -ERANGE; + /* Flush all tx queues */ + for (i = 0; i < sq_cnt; i++) { + txq = eth_dev->data->tx_queues[i]; + + rc = otx2_nix_sq_sqb_aura_fc(txq, false); + if (rc) { + otx2_err("Failed to disable sqb aura fc, rc=%d", rc); + goto cleanup; + } + + /* Wait for sq entries to be flushed */ + rc = nix_txq_flush_sq_spin(txq); + if (rc) { + otx2_err("Failed to drain sq, rc=%d\n", rc); + goto cleanup; + } } - if (node_id < dev->tm_leaf_cnt) - exp_next_lvl = NIX_TXSCH_LVL_SMQ; - else - exp_next_lvl = hw_lvl + 1; + /* XOFF & Flush all SMQ's. HRM mandates + * all SQ's empty before SMQ flush is issued. + */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ) + continue; + if (!(tm_node->flags & NIX_TM_NODE_HWRES)) + continue; - /* Check if there is no parent node yet */ - if (hw_lvl != dev->otx2_tm_root_lvl && - (!parent_node || parent_node->hw_lvl != exp_next_lvl)) { - error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; - error->message = "invalid parent node id"; - return -EINVAL; + rc = nix_smq_xoff(dev, tm_node, true); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->hw_id, rc); + goto cleanup; + } } - /* Check if a node already exists */ - if (nix_tm_node_search(dev, node_id, true)) { - error->type = RTE_TM_ERROR_TYPE_NODE_ID; - error->message = "node already exists"; - return -EINVAL; + /* Verify sanity of all tx queues */ + for (i = 0; i < sq_cnt; i++) { + txq = eth_dev->data->tx_queues[i]; + + wdata = ((uint64_t)txq->sq << 32); + val = otx2_atomic64_add_nosync(wdata, + (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS)); + + sqb_cnt = val & 0xFFFF; + head_off = (val >> 20) & 0x3F; + tail_off = (val >> 28) & 0x3F; + + if (sqb_cnt > 1 || head_off != tail_off || + (*txq->fc_mem != txq->nb_sqb_bufs)) + otx2_err("Failed to gracefully flush sq %u", txq->sq); } - /* Check if shaper profile exists for non leaf node */ - if (!nix_tm_is_leaf(dev, lvl) && - params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE && - !nix_tm_shaper_profile_search(dev, params->shaper_profile_id)) { - error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; - error->message = "invalid shaper profile"; - return -EINVAL; +cleanup: + /* restore cgx state */ + if (!eth_dev->data->dev_started) { + otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox); + rc |= otx2_mbox_process(dev->mbox); } - /* Check if there is second DWRR already in siblings or holes in prio */ - if (validate_prio(dev, lvl, parent_node_id, priority, error)) + return rc; +} + +static int +otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + + if (is_leaf == NULL) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; return -EINVAL; + } - if (weight > MAX_SCHED_WEIGHT) { - error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; - error->message = "max weight exceeded"; + tm_node = nix_tm_node_search(dev, node_id, true); + if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; return -EINVAL; } + if (nix_tm_is_leaf(dev, tm_node->lvl)) + *is_leaf = true; + else + *is_leaf = false; + return 0; +} - rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id, - priority, weight, hw_lvl, - lvl, true, params); +static int +otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + int rc, max_nr_nodes = 0, i; + struct free_rsrcs_rsp *rsp; + + memset(cap, 0, sizeof(*cap)); + + otx2_mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); if (rc) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - /* cleanup user added nodes */ - if (clear_on_fail) - nix_tm_free_resources(dev, NIX_TM_NODE_USER, - NIX_TM_NODE_USER, false); - error->message = "failed to add node"; + error->message = "unexpected fatal error"; return rc; } - error->type = RTE_TM_ERROR_TYPE_NONE; + + for (i = 0; i < NIX_TXSCH_LVL_TL1; i++) + max_nr_nodes += rsp->schq[i]; + + cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt; + /* TL1 level is reserved for PF */ + cap->n_levels_max = nix_tm_have_tl1_access(dev) ? + OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1; + cap->non_leaf_nodes_identical = 1; + cap->leaf_nodes_identical = 1; + + /* Shaper Capabilities */ + cap->shaper_private_n_max = max_nr_nodes; + cap->shaper_n_max = max_nr_nodes; + cap->shaper_private_dual_rate_n_max = max_nr_nodes; + cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8; + cap->shaper_pkt_length_adjust_min = 0; + cap->shaper_pkt_length_adjust_max = 0; + + /* Schedule Capabilities */ + cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ]; + cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX; + cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max; + cap->sched_wfq_n_groups_max = 1; + cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT; + + cap->dynamic_update_mask = + RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL | + RTE_TM_UPDATE_NODE_SUSPEND_RESUME; + cap->stats_mask = + RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES | + RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + + for (i = 0; i < RTE_COLORS; i++) { + cap->mark_vlan_dei_supported[i] = false; + cap->mark_ip_ecn_tcp_supported[i] = false; + cap->mark_ip_dscp_supported[i] = false; + } + return 0; } static int -otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id, - struct rte_tm_error *error) +otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) { struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); - struct otx2_nix_tm_node *tm_node, *child_node; - struct otx2_nix_tm_shaper_profile *profile; - uint32_t profile_id; + struct otx2_mbox *mbox = dev->mbox; + struct free_rsrcs_rsp *rsp; + uint16_t hw_lvl; + int rc; - /* we don't support dynamic updates yet */ - if (dev->tm_flags & NIX_TM_COMMITTED) { - error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; - error->message = "hierarchy exists"; - return -EIO; + memset(cap, 0, sizeof(*cap)); + + otx2_mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + return rc; } - if (node_id == RTE_TM_NODE_ID_NULL) { - error->type = RTE_TM_ERROR_TYPE_NODE_ID; - error->message = "invalid node id"; - return -EINVAL; + hw_lvl = nix_tm_lvl2nix(dev, lvl); + + if (nix_tm_is_leaf(dev, lvl)) { + /* Leaf */ + cap->n_nodes_max = dev->tm_leaf_cnt; + cap->n_nodes_leaf_max = dev->tm_leaf_cnt; + cap->leaf_nodes_identical = 1; + cap->leaf.stats_mask = + RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES; + + } else if (lvl == OTX2_TM_LVL_ROOT) { + /* Root node, aka TL2(vf)/TL1(pf) */ + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->non_leaf_nodes_identical = 1; + + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = + nix_tm_have_tl1_access(dev) ? false : true; + cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8; + + cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1]; + cap->nonleaf.sched_sp_n_priorities_max = + nix_max_prio(dev, hw_lvl) + 1; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT; + + if (nix_tm_have_tl1_access(dev)) + cap->nonleaf.stats_mask = + RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + } else if ((lvl < OTX2_TM_LVL_MAX) && + (hw_lvl < NIX_TXSCH_LVL_CNT)) { + /* TL2, TL3, TL4, MDQ */ + cap->n_nodes_max = rsp->schq[hw_lvl]; + cap->n_nodes_nonleaf_max = cap->n_nodes_max; + cap->non_leaf_nodes_identical = 1; + + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = true; + cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8; + + /* MDQ doesn't support Strict Priority */ + if (hw_lvl == NIX_TXSCH_LVL_MDQ) + cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt; + else + cap->nonleaf.sched_n_children_max = + rsp->schq[hw_lvl - 1]; + cap->nonleaf.sched_sp_n_priorities_max = + nix_max_prio(dev, hw_lvl) + 1; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT; + } else { + /* unsupported level */ + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + return rc; } + return 0; +} + +static int +otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct free_rsrcs_rsp *rsp; + int rc, hw_lvl, lvl; + + memset(cap, 0, sizeof(*cap)); tm_node = nix_tm_node_search(dev, node_id, true); if (!tm_node) { @@ -1787,28 +2021,744 @@ otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id, return -EINVAL; } - /* Check for any existing children */ - TAILQ_FOREACH(child_node, &dev->node_list, node) { - if (child_node->parent == tm_node) { - error->type = RTE_TM_ERROR_TYPE_NODE_ID; - error->message = "children exist"; - return -EINVAL; - } + hw_lvl = tm_node->hw_lvl; + lvl = tm_node->lvl; + + /* Leaf node */ + if (nix_tm_is_leaf(dev, lvl)) { + cap->stats_mask = RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES; + return 0; } - /* Remove shaper profile reference */ - profile_id = tm_node->params.shaper_profile_id; - profile = nix_tm_shaper_profile_search(dev, profile_id); + otx2_mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + return rc; + } + + /* Non Leaf Shaper */ + cap->shaper_private_supported = true; + cap->shaper_private_dual_rate_supported = + (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true; + cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8; + + /* Non Leaf Scheduler */ + if (hw_lvl == NIX_TXSCH_LVL_MDQ) + cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt; + else + cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1]; + + cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = + cap->nonleaf.sched_n_children_max; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT; + + if (hw_lvl == NIX_TXSCH_LVL_TL1) + cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + return 0; +} + +static int +otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, + uint32_t profile_id, + struct rte_tm_shaper_params *params, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile *profile; + + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID exist"; + return -EINVAL; + } + + /* Committed rate and burst size can be enabled/disabled */ + if (params->committed.size || params->committed.rate) { + if (params->committed.size < MIN_SHAPER_BURST || + params->committed.size > MAX_SHAPER_BURST) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; + return -EINVAL; + } else if (!shaper_rate_to_nix(params->committed.rate * 8, + NULL, NULL, NULL)) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; + error->message = "shaper committed rate invalid"; + return -EINVAL; + } + } + + /* Peak rate and burst size can be enabled/disabled */ + if (params->peak.size || params->peak.rate) { + if (params->peak.size < MIN_SHAPER_BURST || + params->peak.size > MAX_SHAPER_BURST) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; + return -EINVAL; + } else if (!shaper_rate_to_nix(params->peak.rate * 8, + NULL, NULL, NULL)) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; + error->message = "shaper peak rate invalid"; + return -EINVAL; + } + } + + profile = rte_zmalloc("otx2_nix_tm_shaper_profile", + sizeof(struct otx2_nix_tm_shaper_profile), 0); + if (!profile) + return -ENOMEM; + + profile->shaper_profile_id = profile_id; + rte_memcpy(&profile->params, params, + sizeof(struct rte_tm_shaper_params)); + TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper); + + otx2_tm_dbg("Added TM shaper profile %u, " + " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64 + ", cbs %" PRIu64 " , adj %u", + profile_id, + params->peak.rate * 8, + params->peak.size, + params->committed.rate * 8, + params->committed.size, + params->pkt_length_adjust); + + /* Translate rate as bits per second */ + profile->params.peak.rate = profile->params.peak.rate * 8; + profile->params.committed.rate = profile->params.committed.rate * 8; + /* Always use PIR for single rate shaping */ + if (!params->peak.rate && params->committed.rate) { + profile->params.peak = profile->params.committed; + memset(&profile->params.committed, 0, + sizeof(profile->params.committed)); + } + + /* update min rate */ + nix_tm_shaper_profile_update_min(dev); + return 0; +} + +static int +otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev, + uint32_t profile_id, + struct rte_tm_error *error) +{ + struct otx2_nix_tm_shaper_profile *profile; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + profile = nix_tm_shaper_profile_search(dev, profile_id); + + if (!profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID not exist"; + return -EINVAL; + } + + if (profile->reference_count) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "shaper profile in use"; + return -EINVAL; + } + + otx2_tm_dbg("Removing TM shaper profile %u", profile_id); + TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper); + rte_free(profile); + + /* update min rate */ + nix_tm_shaper_profile_update_min(dev); + return 0; +} + +static int +otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t lvl, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *parent_node; + int rc, clear_on_fail = 0; + uint32_t exp_next_lvl; + uint16_t hw_lvl; + + /* we don't support dynamic updates */ + if (dev->tm_flags & NIX_TM_COMMITTED) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "dynamic update not supported"; + return -EIO; + } + + /* Leaf nodes have to be same priority */ + if (nix_tm_is_leaf(dev, lvl) && priority != 0) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "queue shapers must be priority 0"; + return -EIO; + } + + parent_node = nix_tm_node_search(dev, parent_node_id, true); + + /* find the right level */ + if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) { + if (parent_node_id == RTE_TM_NODE_ID_NULL) { + lvl = OTX2_TM_LVL_ROOT; + } else if (parent_node) { + lvl = parent_node->lvl + 1; + } else { + /* Neigher proper parent nor proper level id given */ + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "invalid parent node id"; + return -ERANGE; + } + } + + /* Translate rte_tm level id's to nix hw level id's */ + hw_lvl = nix_tm_lvl2nix(dev, lvl); + if (hw_lvl == NIX_TXSCH_LVL_CNT && + !nix_tm_is_leaf(dev, lvl)) { + error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; + error->message = "invalid level id"; + return -ERANGE; + } + + if (node_id < dev->tm_leaf_cnt) + exp_next_lvl = NIX_TXSCH_LVL_SMQ; + else + exp_next_lvl = hw_lvl + 1; + + /* Check if there is no parent node yet */ + if (hw_lvl != dev->otx2_tm_root_lvl && + (!parent_node || parent_node->hw_lvl != exp_next_lvl)) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "invalid parent node id"; + return -EINVAL; + } + + /* Check if a node already exists */ + if (nix_tm_node_search(dev, node_id, true)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "node already exists"; + return -EINVAL; + } + + /* Check if shaper profile exists for non leaf node */ + if (!nix_tm_is_leaf(dev, lvl) && + params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE && + !nix_tm_shaper_profile_search(dev, params->shaper_profile_id)) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "invalid shaper profile"; + return -EINVAL; + } + + /* Check if there is second DWRR already in siblings or holes in prio */ + if (validate_prio(dev, lvl, parent_node_id, priority, error)) + return -EINVAL; + + if (weight > MAX_SCHED_WEIGHT) { + error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; + error->message = "max weight exceeded"; + return -EINVAL; + } + + rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id, + priority, weight, hw_lvl, + lvl, true, params); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + /* cleanup user added nodes */ + if (clear_on_fail) + nix_tm_free_resources(dev, NIX_TM_NODE_USER, + NIX_TM_NODE_USER, false); + error->message = "failed to add node"; + return rc; + } + error->type = RTE_TM_ERROR_TYPE_NONE; + return 0; +} + +static int +otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node, *child_node; + struct otx2_nix_tm_shaper_profile *profile; + uint32_t profile_id; + + /* we don't support dynamic updates yet */ + if (dev->tm_flags & NIX_TM_COMMITTED) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "hierarchy exists"; + return -EIO; + } + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Check for any existing children */ + TAILQ_FOREACH(child_node, &dev->node_list, node) { + if (child_node->parent == tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "children exist"; + return -EINVAL; + } + } + + /* Remove shaper profile reference */ + profile_id = tm_node->params.shaper_profile_id; + profile = nix_tm_shaper_profile_search(dev, profile_id); profile->reference_count--; - TAILQ_REMOVE(&dev->node_list, tm_node, node); - rte_free(tm_node); + TAILQ_REMOVE(&dev->node_list, tm_node, node); + rte_free(tm_node); + return 0; +} + +static int +nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error, bool suspend) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct nix_txschq_config *req; + uint16_t flags; + int rc; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + if (!(dev->tm_flags & NIX_TM_COMMITTED)) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy doesn't exist"; + return -EINVAL; + } + + flags = tm_node->flags; + flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) : + (flags | NIX_TM_NODE_ENABLED); + + if (tm_node->flags == flags) + return 0; + + /* send mbox for state change */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + + req->lvl = tm_node->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node, suspend, + req->reg, req->regval); + rc = send_tm_reqval(mbox, req, error); + if (!rc) + tm_node->flags = flags; + return rc; +} + +static int +otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error) +{ + return nix_tm_node_suspend_resume(eth_dev, node_id, error, true); +} + +static int +otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error) +{ + return nix_tm_node_suspend_resume(eth_dev, node_id, error, false); +} + +static int +otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + uint32_t leaf_cnt = 0; + int rc; + + if (dev->tm_flags & NIX_TM_COMMITTED) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy exists"; + return -EINVAL; + } + + /* Check if we have all the leaf nodes */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->flags & NIX_TM_NODE_USER && + tm_node->id < dev->tm_leaf_cnt) + leaf_cnt++; + } + + if (leaf_cnt != dev->tm_leaf_cnt) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "incomplete hierarchy"; + return -EINVAL; + } + + /* + * Disable xmit will be enabled when + * new topology is available. + */ + rc = nix_xmit_disable(eth_dev); + if (rc) { + otx2_err("failed to disable TX, rc=%d", rc); + return -EIO; + } + + /* Delete default/ratelimit tree */ + if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) { + rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "failed to free default resources"; + return rc; + } + dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE | + NIX_TM_RATE_LIMIT_TREE); + } + + /* Free up user alloc'ed resources */ + rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, + NIX_TM_NODE_USER, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "failed to free user resources"; + return rc; + } + + rc = nix_tm_alloc_resources(eth_dev, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "alloc resources failed"; + /* TODO should we restore default config ? */ + if (clear_on_fail) + nix_tm_free_resources(dev, 0, 0, false); + return rc; + } + + error->type = RTE_TM_ERROR_TYPE_NONE; + dev->tm_flags |= NIX_TM_COMMITTED; + return 0; +} + +static int +otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, + uint32_t node_id, + uint32_t profile_id, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile *profile = NULL; + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct nix_txschq_config *req; + uint8_t k; + int rc; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node"; + return -EINVAL; + } + + if (profile_id == tm_node->params.shaper_profile_id) + return 0; + + if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (!profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID not exist"; + return -EINVAL; + } + } + + tm_node->params.shaper_profile_id = profile_id; + + /* Nothing to do if not yet committed */ + if (!(dev->tm_flags & NIX_TM_COMMITTED)) + return 0; + + tm_node->flags &= ~NIX_TM_NODE_ENABLED; + + /* Flush the specific node with SW_XOFF */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = tm_node->hw_lvl; + k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval); + req->num_regs = k; + + rc = send_tm_reqval(mbox, req, error); + if (rc) + return rc; + + shaper_default_red_algo(dev, tm_node, profile); + + /* Update the PIR/CIR and clear SW XOFF */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = tm_node->hw_lvl; + + k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval); + + k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]); + + req->num_regs = k; + rc = send_tm_reqval(mbox, req, error); + if (!rc) + tm_node->flags |= NIX_TM_NODE_ENABLED; + return rc; +} + +static int +otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, + uint32_t node_id, uint32_t new_parent_id, + uint32_t priority, uint32_t weight, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node, *sibling; + struct otx2_nix_tm_node *new_parent; + struct nix_txschq_config *req; + uint8_t k; + int rc; + + if (!(dev->tm_flags & NIX_TM_COMMITTED)) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy doesn't exist"; + return -EINVAL; + } + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Parent id valid only for non root nodes */ + if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) { + new_parent = nix_tm_node_search(dev, new_parent_id, true); + if (!new_parent) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "no such parent node"; + return -EINVAL; + } + + /* Current support is only for dynamic weight update */ + if (tm_node->parent != new_parent || + tm_node->priority != priority) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "only weight update supported"; + return -EINVAL; + } + } + + /* Skip if no change */ + if (tm_node->weight == weight) + return 0; + + tm_node->weight = weight; + + /* For leaf nodes, SQ CTX needs update */ + if (nix_tm_is_leaf(dev, tm_node->lvl)) { + /* Update SQ quantum data on the fly */ + rc = nix_sq_sched_data(dev, tm_node, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "sq sched data update failed"; + return rc; + } + } else { + /* XOFF Parent node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->parent->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XOFF this node and all other siblings */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + + k = 0; + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + k += prepare_tm_sw_xoff(sibling, true, &req->reg[k], + &req->regval[k]); + } + req->num_regs = k; + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* Update new weight for current node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + req->num_regs = prepare_tm_sched_reg(dev, tm_node, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XON this node and all other siblings */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + + k = 0; + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + k += prepare_tm_sw_xoff(sibling, false, &req->reg[k], + &req->regval[k]); + } + req->num_regs = k; + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XON Parent node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->parent->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + } return 0; } +static int +otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_node_stats *stats, + uint64_t *stats_mask, int clear, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + uint64_t reg, val; + int64_t *addr; + int rc = 0; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Stats support only for leaf node or TL1 root */ + if (nix_tm_is_leaf(dev, tm_node->lvl)) { + reg = (((uint64_t)tm_node->id) << 32); + + /* Packets */ + addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->n_pkts = val - tm_node->last_pkts; + + /* Bytes */ + addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->n_bytes = val - tm_node->last_bytes; + + if (clear) { + tm_node->last_pkts = stats->n_pkts; + tm_node->last_bytes = stats->n_bytes; + } + + *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES; + + } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "stats read error"; + + /* RED Drop packets */ + reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id); + rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1); + if (rc) + goto exit; + stats->leaf.n_pkts_dropped[RTE_COLOR_RED] = + val - tm_node->last_pkts; + + /* RED Drop bytes */ + reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id); + rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1); + if (rc) + goto exit; + stats->leaf.n_bytes_dropped[RTE_COLOR_RED] = + val - tm_node->last_bytes; + + /* Clear stats */ + if (clear) { + tm_node->last_pkts = + stats->leaf.n_pkts_dropped[RTE_COLOR_RED]; + tm_node->last_bytes = + stats->leaf.n_bytes_dropped[RTE_COLOR_RED]; + } + + *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + + } else { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "unsupported node"; + rc = -EINVAL; + } + +exit: + return rc; +} + const struct rte_tm_ops otx2_tm_ops = { + .node_type_get = otx2_nix_tm_node_type_get, + + .capabilities_get = otx2_nix_tm_capa_get, + .level_capabilities_get = otx2_nix_tm_level_capa_get, + .node_capabilities_get = otx2_nix_tm_node_capa_get, + + .shaper_profile_add = otx2_nix_tm_shaper_profile_add, + .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete, + .node_add = otx2_nix_tm_node_add, .node_delete = otx2_nix_tm_node_delete, + .node_suspend = otx2_nix_tm_node_suspend, + .node_resume = otx2_nix_tm_node_resume, + .hierarchy_commit = otx2_nix_tm_hierarchy_commit, + + .node_shaper_update = otx2_nix_tm_node_shaper_update, + .node_parent_update = otx2_nix_tm_node_parent_update, + .node_stats_read = otx2_nix_tm_node_stats_read, }; static int @@ -1956,6 +2906,260 @@ int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev) return 0; } +static int +nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint32_t def = eth_dev->data->nb_tx_queues; + struct rte_tm_node_params params; + uint32_t leaf_parent, i, rc = 0; + + memset(¶ms, 0, sizeof(params)); + + if (nix_tm_have_tl1_access(dev)) { + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL1, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH3, false, ¶ms); + if (rc) + goto error; + leaf_parent = def + 3; + + /* Add per queue SMQ nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i, + leaf_parent, + 0, DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH4, + false, ¶ms); + if (rc) + goto error; + } + + /* Add leaf nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, i, + leaf_parent + 1 + i, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_CNT, + OTX2_TM_LVL_QUEUE, + false, ¶ms); + if (rc) + goto error; + } + + return 0; + } + + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto error; + leaf_parent = def + 2; + + /* Add per queue SMQ nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i, + leaf_parent, + 0, DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH3, + false, ¶ms); + if (rc) + goto error; + } + + /* Add leaf nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_CNT, + OTX2_TM_LVL_SCH4, + false, ¶ms); + if (rc) + break; + } +error: + return rc; +} + +static int +otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev, + struct otx2_nix_tm_node *tm_node, + uint64_t tx_rate) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile profile; + struct otx2_mbox *mbox = dev->mbox; + volatile uint64_t *reg, *regval; + struct nix_txschq_config *req; + uint16_t flags; + uint8_t k = 0; + int rc; + + flags = tm_node->flags; + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = NIX_TXSCH_LVL_MDQ; + reg = req->reg; + regval = req->regval; + + if (tx_rate == 0) { + k += prepare_tm_sw_xoff(tm_node, true, ®[k], ®val[k]); + flags &= ~NIX_TM_NODE_ENABLED; + goto exit; + } + + if (!(flags & NIX_TM_NODE_ENABLED)) { + k += prepare_tm_sw_xoff(tm_node, false, ®[k], ®val[k]); + flags |= NIX_TM_NODE_ENABLED; + } + + /* Use only PIR for rate limit */ + memset(&profile, 0, sizeof(profile)); + profile.params.peak.rate = tx_rate; + /* Minimum burst of ~4us Bytes of Tx */ + profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS, + (4ull * tx_rate) / (1E6 * 8)); + if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate) + dev->tm_rate_min = tx_rate; + + k += prepare_tm_shaper_reg(tm_node, &profile, ®[k], ®val[k]); +exit: + req->num_regs = k; + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + tm_node->flags = flags; + return 0; +} + +int +otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t tx_rate_mbps) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6; + struct otx2_nix_tm_node *tm_node; + int rc; + + /* Check for supported revisions */ + if (otx2_dev_is_95xx_Ax(dev) || + otx2_dev_is_96xx_Ax(dev)) + return -EINVAL; + + if (queue_idx >= eth_dev->data->nb_tx_queues) + return -EINVAL; + + if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) && + !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE)) + goto error; + + if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) && + eth_dev->data->nb_tx_queues > 1) { + /* For TM topology change ethdev needs to be stopped */ + if (eth_dev->data->dev_started) + return -EBUSY; + + /* + * Disable xmit will be enabled when + * new topology is available. + */ + rc = nix_xmit_disable(eth_dev); + if (rc) { + otx2_err("failed to disable TX, rc=%d", rc); + return -EIO; + } + + rc = nix_tm_free_resources(dev, 0, 0, false); + if (rc < 0) { + otx2_tm_dbg("failed to free default resources, rc %d", + rc); + return -EIO; + } + + rc = nix_tm_prepare_rate_limited_tree(eth_dev); + if (rc < 0) { + otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc); + return rc; + } + + rc = nix_tm_alloc_resources(eth_dev, true); + if (rc != 0) { + otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc); + return rc; + } + + dev->tm_flags &= ~NIX_TM_DEFAULT_TREE; + dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE; + } + + tm_node = nix_tm_node_search(dev, queue_idx, false); + + /* check if we found a valid leaf node */ + if (!tm_node || + !nix_tm_is_leaf(dev, tm_node->lvl) || + !tm_node->parent || + tm_node->parent->hw_id == UINT32_MAX) + return -EIO; + + return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate); +error: + otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags); + return -EINVAL; +} + +int +otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + if (!arg) + return -EINVAL; + + /* Check for supported revisions */ + if (otx2_dev_is_95xx_Ax(dev) || + otx2_dev_is_96xx_Ax(dev)) + return -EINVAL; + + *(const void **)arg = &otx2_tm_ops; + + return 0; +} + int otx2_nix_tm_fini(struct rte_eth_dev *eth_dev) {