X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Focteontx2%2Fotx2_tm.c;h=fdd56697f1c049c6bfc4343e1f393e1cdab1ee5f;hb=f30e69b41f94;hp=12903ec1b8dcef8530355b70721371f56333395e;hpb=d1d823e7a8652b4df81ff4e0f47a38263618979c;p=dpdk.git diff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c index 12903ec1b8..fdd56697f1 100644 --- a/drivers/net/octeontx2/otx2_tm.c +++ b/drivers/net/octeontx2/otx2_tm.c @@ -28,8 +28,8 @@ uint64_t shaper2regval(struct shaper_params *shaper) (shaper->mantissa << 1); } -static int -nix_get_link(struct otx2_eth_dev *dev) +int +otx2_nix_get_link(struct otx2_eth_dev *dev) { int link = 13 /* SDP */; uint16_t lmac_chan; @@ -59,8 +59,16 @@ static bool nix_tm_have_tl1_access(struct otx2_eth_dev *dev) { bool is_lbk = otx2_dev_is_lbk(dev); - return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && - !is_lbk && !dev->maxvf; + return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk; +} + +static bool +nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl) +{ + if (nix_tm_have_tl1_access(dev)) + return (lvl == OTX2_TM_LVL_QUEUE); + + return (lvl == OTX2_TM_LVL_SCH4); } static int @@ -94,52 +102,50 @@ nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id) } static inline uint64_t -shaper_rate_to_nix(uint64_t cclk_hz, uint64_t cclk_ticks, - uint64_t value, uint64_t *exponent_p, +shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p, uint64_t *mantissa_p, uint64_t *div_exp_p) { uint64_t div_exp, exponent, mantissa; /* Boundary checks */ - if (value < MIN_SHAPER_RATE(cclk_hz, cclk_ticks) || - value > MAX_SHAPER_RATE(cclk_hz, cclk_ticks)) + if (value < MIN_SHAPER_RATE || + value > MAX_SHAPER_RATE) return 0; - if (value <= SHAPER_RATE(cclk_hz, cclk_ticks, 0, 0, 0)) { + if (value <= SHAPER_RATE(0, 0, 0)) { /* Calculate rate div_exp and mantissa using * the following formula: * - * value = (cclk_hz * (256 + mantissa) - * / ((cclk_ticks << div_exp) * 256) + * value = (2E6 * (256 + mantissa) + * / ((1 << div_exp) * 256)) */ div_exp = 0; exponent = 0; mantissa = MAX_RATE_MANTISSA; - while (value < (cclk_hz / (cclk_ticks << div_exp))) + while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp))) div_exp += 1; while (value < - ((cclk_hz * (256 + mantissa)) / - ((cclk_ticks << div_exp) * 256))) + ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) / + ((1 << div_exp) * 256))) mantissa -= 1; } else { /* Calculate rate exponent and mantissa using * the following formula: * - * value = (cclk_hz * ((256 + mantissa) << exponent) - * / (cclk_ticks * 256) + * value = (2E6 * ((256 + mantissa) << exponent)) / 256 * */ div_exp = 0; exponent = MAX_RATE_EXPONENT; mantissa = MAX_RATE_MANTISSA; - while (value < (cclk_hz * (1 << exponent)) / cclk_ticks) + while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent))) exponent -= 1; - while (value < (cclk_hz * ((256 + mantissa) << exponent)) / - (cclk_ticks * 256)) + while (value < ((NIX_SHAPER_RATE_CONST * + ((256 + mantissa) << exponent)) / 256)) mantissa -= 1; } @@ -155,20 +161,7 @@ shaper_rate_to_nix(uint64_t cclk_hz, uint64_t cclk_ticks, *mantissa_p = mantissa; /* Calculate real rate value */ - return SHAPER_RATE(cclk_hz, cclk_ticks, exponent, mantissa, div_exp); -} - -static inline uint64_t -lx_shaper_rate_to_nix(uint64_t cclk_hz, uint32_t hw_lvl, - uint64_t value, uint64_t *exponent, - uint64_t *mantissa, uint64_t *div_exp) -{ - if (hw_lvl == NIX_TXSCH_LVL_TL1) - return shaper_rate_to_nix(cclk_hz, L1_TIME_WHEEL_CCLK_TICKS, - value, exponent, mantissa, div_exp); - else - return shaper_rate_to_nix(cclk_hz, LX_TIME_WHEEL_CCLK_TICKS, - value, exponent, mantissa, div_exp); + return SHAPER_RATE(exponent, mantissa, div_exp); } static inline uint64_t @@ -207,307 +200,485 @@ shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p, return SHAPER_BURST(exponent, mantissa); } -static int -configure_shaper_cir_pir_reg(struct otx2_eth_dev *dev, - struct otx2_nix_tm_node *tm_node, - struct shaper_params *cir, - struct shaper_params *pir) -{ - uint32_t shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; - struct otx2_nix_tm_shaper_profile *shaper_profile = NULL; - struct rte_tm_shaper_params *param; - - shaper_profile_id = tm_node->params.shaper_profile_id; - - shaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id); - if (shaper_profile) { - param = &shaper_profile->profile; - /* Calculate CIR exponent and mantissa */ - if (param->committed.rate) - cir->rate = lx_shaper_rate_to_nix(CCLK_HZ, - tm_node->hw_lvl_id, - param->committed.rate, - &cir->exponent, - &cir->mantissa, - &cir->div_exp); - - /* Calculate PIR exponent and mantissa */ - if (param->peak.rate) - pir->rate = lx_shaper_rate_to_nix(CCLK_HZ, - tm_node->hw_lvl_id, - param->peak.rate, - &pir->exponent, - &pir->mantissa, - &pir->div_exp); - - /* Calculate CIR burst exponent and mantissa */ - if (param->committed.size) - cir->burst = shaper_burst_to_nix(param->committed.size, - &cir->burst_exponent, - &cir->burst_mantissa); - - /* Calculate PIR burst exponent and mantissa */ - if (param->peak.size) - pir->burst = shaper_burst_to_nix(param->peak.size, - &pir->burst_exponent, - &pir->burst_mantissa); - } - - return 0; +static void +shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile, + struct shaper_params *cir, + struct shaper_params *pir) +{ + struct rte_tm_shaper_params *param = &profile->params; + + if (!profile) + return; + + /* Calculate CIR exponent and mantissa */ + if (param->committed.rate) + cir->rate = shaper_rate_to_nix(param->committed.rate, + &cir->exponent, + &cir->mantissa, + &cir->div_exp); + + /* Calculate PIR exponent and mantissa */ + if (param->peak.rate) + pir->rate = shaper_rate_to_nix(param->peak.rate, + &pir->exponent, + &pir->mantissa, + &pir->div_exp); + + /* Calculate CIR burst exponent and mantissa */ + if (param->committed.size) + cir->burst = shaper_burst_to_nix(param->committed.size, + &cir->burst_exponent, + &cir->burst_mantissa); + + /* Calculate PIR burst exponent and mantissa */ + if (param->peak.size) + pir->burst = shaper_burst_to_nix(param->peak.size, + &pir->burst_exponent, + &pir->burst_mantissa); } -static int -send_tm_reqval(struct otx2_mbox *mbox, struct nix_txschq_config *req) +static void +shaper_default_red_algo(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node, + struct otx2_nix_tm_shaper_profile *profile) { - int rc; + struct shaper_params cir, pir; - if (req->num_regs > MAX_REGS_PER_MBOX_MSG) - return -ERANGE; + /* C0 doesn't support STALL when both PIR & CIR are enabled */ + if (profile && otx2_dev_is_96xx_Cx(dev)) { + memset(&cir, 0, sizeof(cir)); + memset(&pir, 0, sizeof(pir)); + shaper_config_to_nix(profile, &cir, &pir); - rc = otx2_mbox_process(mbox); - if (rc) - return rc; + if (pir.rate && cir.rate) { + tm_node->red_algo = NIX_REDALG_DISCARD; + tm_node->flags |= NIX_TM_NODE_RED_DISCARD; + return; + } + } - req->num_regs = 0; - return 0; + tm_node->red_algo = NIX_REDALG_STD; + tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD; } static int -populate_tm_registers(struct otx2_eth_dev *dev, - struct otx2_nix_tm_node *tm_node) +populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq) { - uint64_t strict_schedul_prio, rr_prio; struct otx2_mbox *mbox = dev->mbox; - volatile uint64_t *reg, *regval; - uint64_t parent = 0, child = 0; - struct shaper_params cir, pir; struct nix_txschq_config *req; + + /* + * Default config for TL1. + * For VF this is always ignored. + */ + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = NIX_TXSCH_LVL_TL1; + + /* Set DWRR quantum */ + req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); + req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; + req->num_regs++; + + req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); + req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); + req->num_regs++; + + req->reg[2] = NIX_AF_TL1X_CIR(schq); + req->regval[2] = 0; + req->num_regs++; + + return otx2_mbox_process(mbox); +} + +static uint8_t +prepare_tm_sched_reg(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node, + volatile uint64_t *reg, volatile uint64_t *regval) +{ + uint64_t strict_prio = tm_node->priority; + uint32_t hw_lvl = tm_node->hw_lvl; + uint32_t schq = tm_node->hw_id; uint64_t rr_quantum; - uint32_t hw_lvl; - uint32_t schq; - int rc; + uint8_t k = 0; + + rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight); + + /* For children to root, strict prio is default if either + * device root is TL2 or TL1 Static Priority is disabled. + */ + if (hw_lvl == NIX_TXSCH_LVL_TL2 && + (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 || + dev->tm_flags & NIX_TM_TL1_NO_SP)) + strict_prio = TXSCH_TL1_DFLT_RR_PRIO; + + otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, " + "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)", + nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl, + tm_node->id, strict_prio, rr_quantum, tm_node); + + switch (hw_lvl) { + case NIX_TXSCH_LVL_SMQ: + reg[k] = NIX_AF_MDQX_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL4: + reg[k] = NIX_AF_TL4X_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL3: + reg[k] = NIX_AF_TL3X_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL2: + reg[k] = NIX_AF_TL2X_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL1: + reg[k] = NIX_AF_TL1X_SCHEDULE(schq); + regval[k] = rr_quantum; + k++; + + break; + } + + return k; +} + +static uint8_t +prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node, + struct otx2_nix_tm_shaper_profile *profile, + volatile uint64_t *reg, volatile uint64_t *regval) +{ + struct shaper_params cir, pir; + uint32_t schq = tm_node->hw_id; + uint64_t adjust = 0; + uint8_t k = 0; memset(&cir, 0, sizeof(cir)); memset(&pir, 0, sizeof(pir)); + shaper_config_to_nix(profile, &cir, &pir); + + /* Packet length adjust */ + if (tm_node->pkt_mode) + adjust = 1; + else if (profile) + adjust = profile->params.pkt_length_adjust & 0x1FF; + + otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, pir %" PRIu64 + "(%" PRIu64 "B), cir %" PRIu64 "(%" PRIu64 "B)" + "adjust 0x%" PRIx64 "(pktmode %u) (%p)", + nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl, + tm_node->id, pir.rate, pir.burst, cir.rate, cir.burst, + adjust, tm_node->pkt_mode, tm_node); + + switch (tm_node->hw_lvl) { + case NIX_TXSCH_LVL_SMQ: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_MDQX_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (shaper2regval(&pir) | 1) : 0; + k++; + + reg[k] = NIX_AF_MDQX_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (shaper2regval(&cir) | 1) : 0; + k++; + + /* Configure RED ALG */ + reg[k] = NIX_AF_MDQX_SHAPE(schq); + regval[k] = (adjust | + (uint64_t)tm_node->red_algo << 9 | + (uint64_t)tm_node->pkt_mode << 24); + k++; + break; + case NIX_TXSCH_LVL_TL4: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_TL4X_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (shaper2regval(&pir) | 1) : 0; + k++; + + reg[k] = NIX_AF_TL4X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (shaper2regval(&cir) | 1) : 0; + k++; + + /* Configure RED algo */ + reg[k] = NIX_AF_TL4X_SHAPE(schq); + regval[k] = (adjust | + (uint64_t)tm_node->red_algo << 9 | + (uint64_t)tm_node->pkt_mode << 24); + k++; + break; + case NIX_TXSCH_LVL_TL3: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_TL3X_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (shaper2regval(&pir) | 1) : 0; + k++; + + reg[k] = NIX_AF_TL3X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (shaper2regval(&cir) | 1) : 0; + k++; + + /* Configure RED algo */ + reg[k] = NIX_AF_TL3X_SHAPE(schq); + regval[k] = (adjust | + (uint64_t)tm_node->red_algo << 9 | + (uint64_t)tm_node->pkt_mode << 24); + k++; - /* Skip leaf nodes */ - if (tm_node->hw_lvl_id == NIX_TXSCH_LVL_CNT) - return 0; + break; + case NIX_TXSCH_LVL_TL2: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_TL2X_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (shaper2regval(&pir) | 1) : 0; + k++; + + reg[k] = NIX_AF_TL2X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (shaper2regval(&cir) | 1) : 0; + k++; + + /* Configure RED algo */ + reg[k] = NIX_AF_TL2X_SHAPE(schq); + regval[k] = (adjust | + (uint64_t)tm_node->red_algo << 9 | + (uint64_t)tm_node->pkt_mode << 24); + k++; + + break; + case NIX_TXSCH_LVL_TL1: + /* Configure CIR */ + reg[k] = NIX_AF_TL1X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (shaper2regval(&cir) | 1) : 0; + k++; + + /* Configure length disable and adjust */ + reg[k] = NIX_AF_TL1X_SHAPE(schq); + regval[k] = (adjust | + (uint64_t)tm_node->pkt_mode << 24); + k++; + break; + } + + return k; +} + +static uint8_t +prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable, + volatile uint64_t *reg, volatile uint64_t *regval) +{ + uint32_t hw_lvl = tm_node->hw_lvl; + uint32_t schq = tm_node->hw_id; + uint8_t k = 0; + + otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)", + nix_hwlvl2str(hw_lvl), schq, tm_node->lvl, + tm_node->id, enable, tm_node); + + regval[k] = enable; + + switch (hw_lvl) { + case NIX_TXSCH_LVL_MDQ: + reg[k] = NIX_AF_MDQX_SW_XOFF(schq); + k++; + break; + case NIX_TXSCH_LVL_TL4: + reg[k] = NIX_AF_TL4X_SW_XOFF(schq); + k++; + break; + case NIX_TXSCH_LVL_TL3: + reg[k] = NIX_AF_TL3X_SW_XOFF(schq); + k++; + break; + case NIX_TXSCH_LVL_TL2: + reg[k] = NIX_AF_TL2X_SW_XOFF(schq); + k++; + break; + case NIX_TXSCH_LVL_TL1: + reg[k] = NIX_AF_TL1X_SW_XOFF(schq); + k++; + break; + default: + break; + } + + return k; +} + +static int +populate_tm_reg(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node) +{ + struct otx2_nix_tm_shaper_profile *profile; + uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG]; + uint64_t regval[MAX_REGS_PER_MBOX_MSG]; + uint64_t reg[MAX_REGS_PER_MBOX_MSG]; + struct otx2_mbox *mbox = dev->mbox; + uint64_t parent = 0, child = 0; + uint32_t hw_lvl, rr_prio, schq; + struct nix_txschq_config *req; + int rc = -EFAULT; + uint8_t k = 0; + + memset(regval_mask, 0, sizeof(regval_mask)); + profile = nix_tm_shaper_profile_search(dev, + tm_node->params.shaper_profile_id); + rr_prio = tm_node->rr_prio; + hw_lvl = tm_node->hw_lvl; + schq = tm_node->hw_id; /* Root node will not have a parent node */ - if (tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) + if (hw_lvl == dev->otx2_tm_root_lvl) parent = tm_node->parent_hw_id; else parent = tm_node->parent->hw_id; /* Do we need this trigger to configure TL1 */ if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 && - tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) { - schq = parent; - /* - * Default config for TL1. - * For VF this is always ignored. - */ - - req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); - req->lvl = NIX_TXSCH_LVL_TL1; - - /* Set DWRR quantum */ - req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); - req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; - req->num_regs++; - - req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); - req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); - req->num_regs++; - - req->reg[2] = NIX_AF_TL1X_CIR(schq); - req->regval[2] = 0; - req->num_regs++; - - rc = send_tm_reqval(mbox, req); + hw_lvl == dev->otx2_tm_root_lvl) { + rc = populate_tm_tl1_default(dev, parent); if (rc) goto error; } - if (tm_node->hw_lvl_id != NIX_TXSCH_LVL_SMQ) + if (hw_lvl != NIX_TXSCH_LVL_SMQ) child = find_prio_anchor(dev, tm_node->id); - rr_prio = tm_node->rr_prio; - hw_lvl = tm_node->hw_lvl_id; - strict_schedul_prio = tm_node->priority; - schq = tm_node->hw_id; - rr_quantum = (tm_node->weight * NIX_TM_RR_QUANTUM_MAX) / - MAX_SCHED_WEIGHT; - - configure_shaper_cir_pir_reg(dev, tm_node, &cir, &pir); - - otx2_tm_dbg("Configure node %p, lvl %u hw_lvl %u, id %u, hw_id %u," - "parent_hw_id %" PRIx64 ", pir %" PRIx64 ", cir %" PRIx64, - tm_node, tm_node->level_id, hw_lvl, - tm_node->id, schq, parent, pir.rate, cir.rate); + /* Override default rr_prio when TL1 + * Static Priority is disabled + */ + if (hw_lvl == NIX_TXSCH_LVL_TL1 && + dev->tm_flags & NIX_TM_TL1_NO_SP) { + rr_prio = TXSCH_TL1_DFLT_RR_PRIO; + child = 0; + } - rc = -EFAULT; + otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u" + " prio_anchor %"PRIu64" rr_prio %u (%p)", + nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1), + parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node); + /* Prepare Topology and Link config */ switch (hw_lvl) { case NIX_TXSCH_LVL_SMQ: - req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); - req->lvl = hw_lvl; - reg = req->reg; - regval = req->regval; - req->num_regs = 0; - - /* Set xoff which will be cleared later */ - *reg++ = NIX_AF_SMQX_CFG(schq); - *regval++ = BIT_ULL(50) | ((uint64_t)NIX_MAX_VTAG_INS << 36) | - (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS; - req->num_regs++; - *reg++ = NIX_AF_MDQX_PARENT(schq); - *regval++ = parent << 16; - req->num_regs++; - *reg++ = NIX_AF_MDQX_SCHEDULE(schq); - *regval++ = (strict_schedul_prio << 24) | rr_quantum; - req->num_regs++; - if (pir.rate && pir.burst) { - *reg++ = NIX_AF_MDQX_PIR(schq); - *regval++ = shaper2regval(&pir) | 1; - req->num_regs++; - } - if (cir.rate && cir.burst) { - *reg++ = NIX_AF_MDQX_CIR(schq); - *regval++ = shaper2regval(&cir) | 1; - req->num_regs++; - } + /* Set xoff which will be cleared later and minimum length + * which will be used for zero padding if packet length is + * smaller + */ + reg[k] = NIX_AF_SMQX_CFG(schq); + regval[k] = BIT_ULL(50) | ((uint64_t)NIX_MAX_VTAG_INS << 36) | + NIX_MIN_HW_FRS; + regval_mask[k] = ~(BIT_ULL(50) | (0x7ULL << 36) | 0x7f); + k++; + + /* Parent and schedule conf */ + reg[k] = NIX_AF_MDQX_PARENT(schq); + regval[k] = parent << 16; + k++; - rc = send_tm_reqval(mbox, req); - if (rc) - goto error; break; case NIX_TXSCH_LVL_TL4: - req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); - req->lvl = hw_lvl; - req->num_regs = 0; - reg = req->reg; - regval = req->regval; - - *reg++ = NIX_AF_TL4X_PARENT(schq); - *regval++ = parent << 16; - req->num_regs++; - *reg++ = NIX_AF_TL4X_TOPOLOGY(schq); - *regval++ = (child << 32) | (rr_prio << 1); - req->num_regs++; - *reg++ = NIX_AF_TL4X_SCHEDULE(schq); - *regval++ = (strict_schedul_prio << 24) | rr_quantum; - req->num_regs++; - if (pir.rate && pir.burst) { - *reg++ = NIX_AF_TL4X_PIR(schq); - *regval++ = shaper2regval(&pir) | 1; - req->num_regs++; - } - if (cir.rate && cir.burst) { - *reg++ = NIX_AF_TL4X_CIR(schq); - *regval++ = shaper2regval(&cir) | 1; - req->num_regs++; + /* Parent and schedule conf */ + reg[k] = NIX_AF_TL4X_PARENT(schq); + regval[k] = parent << 16; + k++; + + reg[k] = NIX_AF_TL4X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1); + k++; + + /* Configure TL4 to send to SDP channel instead of CGX/LBK */ + if (otx2_dev_is_sdp(dev)) { + reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq); + regval[k] = BIT_ULL(12); + k++; } - - rc = send_tm_reqval(mbox, req); - if (rc) - goto error; break; case NIX_TXSCH_LVL_TL3: - req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); - req->lvl = hw_lvl; - req->num_regs = 0; - reg = req->reg; - regval = req->regval; - - *reg++ = NIX_AF_TL3X_PARENT(schq); - *regval++ = parent << 16; - req->num_regs++; - *reg++ = NIX_AF_TL3X_TOPOLOGY(schq); - *regval++ = (child << 32) | (rr_prio << 1); - req->num_regs++; - *reg++ = NIX_AF_TL3X_SCHEDULE(schq); - *regval++ = (strict_schedul_prio << 24) | rr_quantum; - req->num_regs++; - if (pir.rate && pir.burst) { - *reg++ = NIX_AF_TL3X_PIR(schq); - *regval++ = shaper2regval(&pir) | 1; - req->num_regs++; - } - if (cir.rate && cir.burst) { - *reg++ = NIX_AF_TL3X_CIR(schq); - *regval++ = shaper2regval(&cir) | 1; - req->num_regs++; + /* Parent and schedule conf */ + reg[k] = NIX_AF_TL3X_PARENT(schq); + regval[k] = parent << 16; + k++; + + reg[k] = NIX_AF_TL3X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1); + k++; + + /* Link configuration */ + if (!otx2_dev_is_sdp(dev) && + dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) { + reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, + otx2_nix_get_link(dev)); + regval[k] = BIT_ULL(12) | nix_get_relchan(dev); + k++; } - rc = send_tm_reqval(mbox, req); - if (rc) - goto error; break; case NIX_TXSCH_LVL_TL2: - req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); - req->lvl = hw_lvl; - req->num_regs = 0; - reg = req->reg; - regval = req->regval; - - *reg++ = NIX_AF_TL2X_PARENT(schq); - *regval++ = parent << 16; - req->num_regs++; - *reg++ = NIX_AF_TL2X_TOPOLOGY(schq); - *regval++ = (child << 32) | (rr_prio << 1); - req->num_regs++; - *reg++ = NIX_AF_TL2X_SCHEDULE(schq); - if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2) - *regval++ = (1 << 24) | rr_quantum; - else - *regval++ = (strict_schedul_prio << 24) | rr_quantum; - req->num_regs++; - *reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq, nix_get_link(dev)); - *regval++ = BIT_ULL(12) | nix_get_relchan(dev); - req->num_regs++; - if (pir.rate && pir.burst) { - *reg++ = NIX_AF_TL2X_PIR(schq); - *regval++ = shaper2regval(&pir) | 1; - req->num_regs++; - } - if (cir.rate && cir.burst) { - *reg++ = NIX_AF_TL2X_CIR(schq); - *regval++ = shaper2regval(&cir) | 1; - req->num_regs++; + /* Parent and schedule conf */ + reg[k] = NIX_AF_TL2X_PARENT(schq); + regval[k] = parent << 16; + k++; + + reg[k] = NIX_AF_TL2X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1); + k++; + + /* Link configuration */ + if (!otx2_dev_is_sdp(dev) && + dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) { + reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, + otx2_nix_get_link(dev)); + regval[k] = BIT_ULL(12) | nix_get_relchan(dev); + k++; } - rc = send_tm_reqval(mbox, req); - if (rc) - goto error; break; case NIX_TXSCH_LVL_TL1: - req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); - req->lvl = hw_lvl; - req->num_regs = 0; - reg = req->reg; - regval = req->regval; - - *reg++ = NIX_AF_TL1X_SCHEDULE(schq); - *regval++ = rr_quantum; - req->num_regs++; - *reg++ = NIX_AF_TL1X_TOPOLOGY(schq); - *regval++ = (child << 32) | (rr_prio << 1 /*RR_PRIO*/); - req->num_regs++; - if (cir.rate && cir.burst) { - *reg++ = NIX_AF_TL1X_CIR(schq); - *regval++ = shaper2regval(&cir) | 1; - req->num_regs++; - } + reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/); + k++; - rc = send_tm_reqval(mbox, req); - if (rc) - goto error; break; } + /* Prepare schedule config */ + k += prepare_tm_sched_reg(dev, tm_node, ®[k], ®val[k]); + + /* Prepare shaping config */ + k += prepare_tm_shaper_reg(tm_node, profile, ®[k], ®val[k]); + + if (!k) + return 0; + + /* Copy and send config mbox */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = hw_lvl; + req->num_regs = k; + + otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k); + otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k); + otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k); + + rc = otx2_mbox_process(mbox); + if (rc) + goto error; + return 0; error: otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc); @@ -519,16 +690,14 @@ static int nix_tm_txsch_reg_config(struct otx2_eth_dev *dev) { struct otx2_nix_tm_node *tm_node; - uint32_t lvl; + uint32_t hw_lvl; int rc = 0; - if (nix_get_link(dev) == 13) - return -EPERM; - - for (lvl = 0; lvl < (uint32_t)dev->otx2_tm_root_lvl + 1; lvl++) { + for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) { TAILQ_FOREACH(tm_node, &dev->node_list, node) { - if (tm_node->hw_lvl_id == lvl) { - rc = populate_tm_registers(dev, tm_node); + if (tm_node->hw_lvl == hw_lvl && + tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) { + rc = populate_tm_reg(dev, tm_node); if (rc) goto exit; } @@ -618,16 +787,16 @@ nix_tm_update_parent_info(struct otx2_eth_dev *dev) static int nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id, uint32_t parent_node_id, uint32_t priority, - uint32_t weight, uint16_t hw_lvl_id, - uint16_t level_id, bool user, + uint32_t weight, uint16_t hw_lvl, + uint16_t lvl, bool user, struct rte_tm_node_params *params) { - struct otx2_nix_tm_shaper_profile *shaper_profile; + struct otx2_nix_tm_shaper_profile *profile; struct otx2_nix_tm_node *tm_node, *parent_node; - uint32_t shaper_profile_id; + uint32_t profile_id; - shaper_profile_id = params->shaper_profile_id; - shaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id); + profile_id = params->shaper_profile_id; + profile = nix_tm_shaper_profile_search(dev, profile_id); parent_node = nix_tm_node_search(dev, parent_node_id, user); @@ -636,8 +805,12 @@ nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id, if (!tm_node) return -ENOMEM; - tm_node->level_id = level_id; - tm_node->hw_lvl_id = hw_lvl_id; + tm_node->lvl = lvl; + tm_node->hw_lvl = hw_lvl; + + /* Maintain minimum weight */ + if (!weight) + weight = 1; tm_node->id = node_id; tm_node->priority = priority; @@ -648,12 +821,23 @@ nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id, tm_node->flags = 0; if (user) tm_node->flags = NIX_TM_NODE_USER; + + /* Packet mode */ + if (!nix_tm_is_leaf(dev, lvl) && + ((profile && profile->params.packet_mode) || + (params->nonleaf.wfq_weight_mode && + params->nonleaf.n_sp_priorities && + !params->nonleaf.wfq_weight_mode[0]))) + tm_node->pkt_mode = 1; + rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); - if (shaper_profile) - shaper_profile->reference_count++; + if (profile) + profile->reference_count++; + tm_node->parent = parent_node; tm_node->parent_hw_id = UINT32_MAX; + shaper_default_red_algo(dev, tm_node, profile); TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node); @@ -677,24 +861,67 @@ nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev) } static int -nix_smq_xoff(struct otx2_eth_dev *dev, uint16_t smq, bool enable) +nix_clear_path_xoff(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node) +{ + struct nix_txschq_config *req; + struct otx2_nix_tm_node *p; + int rc; + + /* Manipulating SW_XOFF not supported on Ax */ + if (otx2_dev_is_Ax(dev)) + return 0; + + /* Enable nodes in path for flush to succeed */ + if (!nix_tm_is_leaf(dev, tm_node->lvl)) + p = tm_node; + else + p = tm_node->parent; + while (p) { + if (!(p->flags & NIX_TM_NODE_ENABLED) && + (p->flags & NIX_TM_NODE_HWRES)) { + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = p->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(p, false, req->reg, + req->regval); + rc = otx2_mbox_process(dev->mbox); + if (rc) + return rc; + + p->flags |= NIX_TM_NODE_ENABLED; + } + p = p->parent; + } + + return 0; +} + +static int +nix_smq_xoff(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node, + bool enable) { struct otx2_mbox *mbox = dev->mbox; struct nix_txschq_config *req; + uint16_t smq; + int rc; + + smq = tm_node->hw_id; + otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq, + enable ? "enable" : "disable"); + + rc = nix_clear_path_xoff(dev, tm_node); + if (rc) + return rc; req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); req->lvl = NIX_TXSCH_LVL_SMQ; req->num_regs = 1; req->reg[0] = NIX_AF_SMQX_CFG(smq); - /* Unmodified fields */ - req->regval[0] = ((uint64_t)NIX_MAX_VTAG_INS << 36) | - (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS; - - if (enable) - req->regval[0] |= BIT_ULL(50) | BIT_ULL(49); - else - req->regval[0] |= 0; + req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0; + req->regval_mask[0] = enable ? + ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50); return otx2_mbox_process(mbox); } @@ -710,6 +937,9 @@ otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable) uint64_t aura_handle; int rc; + otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq, + enable ? "enable" : "disable"); + lf = otx2_npa_lf_obj_get(); if (!lf) return -EFAULT; @@ -754,22 +984,41 @@ otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable) return 0; } -static void +static int nix_txq_flush_sq_spin(struct otx2_eth_txq *txq) { uint16_t sqb_cnt, head_off, tail_off; struct otx2_eth_dev *dev = txq->dev; + uint64_t wdata, val, prev; uint16_t sq = txq->sq; - uint64_t reg, val; int64_t *regaddr; + uint64_t timeout;/* 10's of usec */ + + /* Wait for enough time based on shaper min rate */ + timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5); + timeout = timeout / dev->tm_rate_min; + if (!timeout) + timeout = 10000; + + wdata = ((uint64_t)sq << 32); + regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS); + val = otx2_atomic64_add_nosync(wdata, regaddr); + + /* Spin multiple iterations as "txq->fc_cache_pkts" can still + * have space to send pkts even though fc_mem is disabled + */ while (true) { - reg = ((uint64_t)sq << 32); - regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS); - val = otx2_atomic64_add_nosync(reg, regaddr); + prev = val; + rte_delay_us(10); + val = otx2_atomic64_add_nosync(wdata, regaddr); + /* Continue on error */ + if (val & BIT_ULL(63)) + continue; + + if (prev != val) + continue; - regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS); - val = otx2_atomic64_add_nosync(reg, regaddr); sqb_cnt = val & 0xFFFF; head_off = (val >> 20) & 0x3F; tail_off = (val >> 28) & 0x3F; @@ -780,114 +1029,220 @@ nix_txq_flush_sq_spin(struct otx2_eth_txq *txq) break; } - rte_pause(); + /* Timeout */ + if (!timeout) + goto exit; + timeout--; } + + return 0; +exit: + otx2_nix_tm_dump(dev); + return -EFAULT; } -int -otx2_nix_tm_sw_xoff(void *__txq, bool dev_started) +/* Flush and disable tx queue and its parent SMQ */ +int otx2_nix_sq_flush_pre(void *_txq, bool dev_started) { - struct otx2_eth_txq *txq = __txq; - struct otx2_eth_dev *dev = txq->dev; - struct otx2_mbox *mbox = dev->mbox; - struct nix_aq_enq_req *req; - struct nix_aq_enq_rsp *rsp; - uint16_t smq; + struct otx2_nix_tm_node *tm_node, *sibling; + struct otx2_eth_txq *txq; + struct otx2_eth_dev *dev; + uint16_t sq; + bool user; int rc; - /* Get smq from sq */ - req = otx2_mbox_alloc_msg_nix_aq_enq(mbox); - req->qidx = txq->sq; - req->ctype = NIX_AQ_CTYPE_SQ; - req->op = NIX_AQ_INSTOP_READ; - rc = otx2_mbox_process_msg(mbox, (void *)&rsp); - if (rc) { - otx2_err("Failed to get smq, rc=%d", rc); - return -EIO; - } + txq = _txq; + dev = txq->dev; + sq = txq->sq; - /* Check if sq is enabled */ - if (!rsp->sq.ena) - return 0; + user = !!(dev->tm_flags & NIX_TM_COMMITTED); - smq = rsp->sq.smq; + /* Find the node for this SQ */ + tm_node = nix_tm_node_search(dev, sq, user); + if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) { + otx2_err("Invalid node/state for sq %u", sq); + return -EFAULT; + } /* Enable CGX RXTX to drain pkts */ if (!dev_started) { - rc = otx2_cgx_rxtx_start(dev); - if (rc) + /* Though it enables both RX MCAM Entries and CGX Link + * we assume all the rx queues are stopped way back. + */ + otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox); + rc = otx2_mbox_process(dev->mbox); + if (rc) { + otx2_err("cgx start failed, rc=%d", rc); return rc; - } - - rc = otx2_nix_sq_sqb_aura_fc(txq, false); - if (rc < 0) { - otx2_err("Failed to disable sqb aura fc, rc=%d", rc); - goto cleanup; + } } /* Disable smq xoff for case it was enabled earlier */ - rc = nix_smq_xoff(dev, smq, false); + rc = nix_smq_xoff(dev, tm_node->parent, false); if (rc) { - otx2_err("Failed to enable smq for sq %u, rc=%d", txq->sq, rc); - goto cleanup; + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->parent->hw_id, rc); + return rc; } - /* Wait for sq entries to be flushed */ - nix_txq_flush_sq_spin(txq); + /* As per HRM, to disable an SQ, all other SQ's + * that feed to same SMQ must be paused before SMQ flush. + */ + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + if (!(sibling->flags & NIX_TM_NODE_ENABLED)) + continue; - /* Flush and enable smq xoff */ - rc = nix_smq_xoff(dev, smq, true); - if (rc) { - otx2_err("Failed to disable smq for sq %u, rc=%d", txq->sq, rc); - return rc; + sq = sibling->id; + txq = dev->eth_dev->data->tx_queues[sq]; + if (!txq) + continue; + + rc = otx2_nix_sq_sqb_aura_fc(txq, false); + if (rc) { + otx2_err("Failed to disable sqb aura fc, rc=%d", rc); + goto cleanup; + } + + /* Wait for sq entries to be flushed */ + rc = nix_txq_flush_sq_spin(txq); + if (rc) { + otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc); + return rc; + } } + tm_node->flags &= ~NIX_TM_NODE_ENABLED; + + /* Disable and flush */ + rc = nix_smq_xoff(dev, tm_node->parent, true); + if (rc) { + otx2_err("Failed to disable smq %u, rc=%d", + tm_node->parent->hw_id, rc); + goto cleanup; + } cleanup: /* Restore cgx state */ - if (!dev_started) - rc |= otx2_cgx_rxtx_stop(dev); + if (!dev_started) { + otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox); + rc |= otx2_mbox_process(dev->mbox); + } return rc; } -static int -nix_tm_sw_xon(struct otx2_eth_txq *txq, - uint16_t smq, uint32_t rr_quantum) +int otx2_nix_sq_flush_post(void *_txq) { - struct otx2_eth_dev *dev = txq->dev; - struct otx2_mbox *mbox = dev->mbox; - struct nix_aq_enq_req *req; + struct otx2_nix_tm_node *tm_node, *sibling; + struct otx2_eth_txq *txq = _txq; + struct otx2_eth_txq *s_txq; + struct otx2_eth_dev *dev; + bool once = false; + uint16_t sq, s_sq; + bool user; int rc; - otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum %u", - txq->sq, txq->sq, rr_quantum); - /* Set smq from sq */ - req = otx2_mbox_alloc_msg_nix_aq_enq(mbox); - req->qidx = txq->sq; + dev = txq->dev; + sq = txq->sq; + user = !!(dev->tm_flags & NIX_TM_COMMITTED); + + /* Find the node for this SQ */ + tm_node = nix_tm_node_search(dev, sq, user); + if (!tm_node) { + otx2_err("Invalid node for sq %u", sq); + return -EFAULT; + } + + /* Enable all the siblings back */ + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + + if (sibling->id == sq) + continue; + + if (!(sibling->flags & NIX_TM_NODE_ENABLED)) + continue; + + s_sq = sibling->id; + s_txq = dev->eth_dev->data->tx_queues[s_sq]; + if (!s_txq) + continue; + + if (!once) { + /* Enable back if any SQ is still present */ + rc = nix_smq_xoff(dev, tm_node->parent, false); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->parent->hw_id, rc); + return rc; + } + once = true; + } + + rc = otx2_nix_sq_sqb_aura_fc(s_txq, true); + if (rc) { + otx2_err("Failed to enable sqb aura fc, rc=%d", rc); + return rc; + } + } + + return 0; +} + +static int +nix_sq_sched_data(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node, + bool rr_quantum_only) +{ + struct rte_eth_dev *eth_dev = dev->eth_dev; + struct otx2_mbox *mbox = dev->mbox; + uint16_t sq = tm_node->id, smq; + struct nix_aq_enq_req *req; + uint64_t rr_quantum; + int rc; + + smq = tm_node->parent->hw_id; + rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight); + + if (rr_quantum_only) + otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum); + else + otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64, + sq, smq, rr_quantum); + + if (sq > eth_dev->data->nb_tx_queues) + return -EFAULT; + + req = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + req->qidx = sq; req->ctype = NIX_AQ_CTYPE_SQ; req->op = NIX_AQ_INSTOP_WRITE; - req->sq.smq = smq; + + /* smq update only when needed */ + if (!rr_quantum_only) { + req->sq.smq = smq; + req->sq_mask.smq = ~req->sq_mask.smq; + } req->sq.smq_rr_quantum = rr_quantum; - req->sq_mask.smq = ~req->sq_mask.smq; req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum; rc = otx2_mbox_process(mbox); - if (rc) { + if (rc) otx2_err("Failed to set smq, rc=%d", rc); - return -EIO; - } + return rc; +} + +int otx2_nix_sq_enable(void *_txq) +{ + struct otx2_eth_txq *txq = _txq; + int rc; /* Enable sqb_aura fc */ rc = otx2_nix_sq_sqb_aura_fc(txq, true); - if (rc < 0) { - otx2_err("Failed to enable sqb aura fc, rc=%d", rc); - return rc; - } - - /* Disable smq xoff */ - rc = nix_smq_xoff(dev, smq, false); if (rc) { - otx2_err("Failed to enable smq for sq %u", txq->sq); + otx2_err("Failed to enable sqb aura fc, rc=%d", rc); return rc; } @@ -898,12 +1253,11 @@ static int nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask, uint32_t flags, bool hw_only) { - struct otx2_nix_tm_shaper_profile *shaper_profile; + struct otx2_nix_tm_shaper_profile *profile; struct otx2_nix_tm_node *tm_node, *next_node; struct otx2_mbox *mbox = dev->mbox; struct nix_txsch_free_req *req; - uint32_t shaper_profile_id; - bool skip_node = false; + uint32_t profile_id; int rc = 0; next_node = TAILQ_FIRST(&dev->node_list); @@ -915,37 +1269,40 @@ nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask, if ((tm_node->flags & flags_mask) != flags) continue; - if (nix_tm_have_tl1_access(dev) && - tm_node->hw_lvl_id == NIX_TXSCH_LVL_TL1) - skip_node = true; - - otx2_tm_dbg("Free hwres for node %u, hwlvl %u, hw_id %u (%p)", - tm_node->id, tm_node->hw_lvl_id, - tm_node->hw_id, tm_node); - /* Free specific HW resource if requested */ - if (!skip_node && flags_mask && + if (!nix_tm_is_leaf(dev, tm_node->lvl) && + tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 && tm_node->flags & NIX_TM_NODE_HWRES) { + /* Free specific HW resource */ + otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)", + nix_hwlvl2str(tm_node->hw_lvl), + tm_node->hw_id, tm_node->lvl, + tm_node->id, tm_node); + + rc = nix_clear_path_xoff(dev, tm_node); + if (rc) + return rc; + req = otx2_mbox_alloc_msg_nix_txsch_free(mbox); req->flags = 0; - req->schq_lvl = tm_node->hw_lvl_id; + req->schq_lvl = tm_node->hw_lvl; req->schq = tm_node->hw_id; rc = otx2_mbox_process(mbox); if (rc) - break; - } else { - skip_node = false; + return rc; + tm_node->flags &= ~NIX_TM_NODE_HWRES; } - tm_node->flags &= ~NIX_TM_NODE_HWRES; /* Leave software elements if needed */ if (hw_only) continue; - shaper_profile_id = tm_node->params.shaper_profile_id; - shaper_profile = - nix_tm_shaper_profile_search(dev, shaper_profile_id); - if (shaper_profile) - shaper_profile->reference_count--; + otx2_tm_dbg("Free node lvl %u id %u (%p)", + tm_node->lvl, tm_node->id, tm_node); + + profile_id = tm_node->params.shaper_profile_id; + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (profile) + profile->reference_count--; TAILQ_REMOVE(&dev->node_list, tm_node, node); rte_free(tm_node); @@ -990,18 +1347,18 @@ nix_tm_assign_id_to_node(struct otx2_eth_dev *dev, uint32_t hw_id, schq_con_index, prio_offset; uint32_t l_id, schq_index; - otx2_tm_dbg("Assign hw id for child node %u, lvl %u, hw_lvl %u (%p)", - child->id, child->level_id, child->hw_lvl_id, child); + otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)", + nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child); child->flags |= NIX_TM_NODE_HWRES; /* Process root nodes */ if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 && - child->hw_lvl_id == dev->otx2_tm_root_lvl && !parent) { + child->hw_lvl == dev->otx2_tm_root_lvl && !parent) { int idx = 0; uint32_t tschq_con_index; - l_id = child->hw_lvl_id; + l_id = child->hw_lvl; tschq_con_index = dev->txschq_contig_index[l_id]; hw_id = dev->txschq_contig_list[l_id][tschq_con_index]; child->hw_id = hw_id; @@ -1013,10 +1370,10 @@ nix_tm_assign_id_to_node(struct otx2_eth_dev *dev, return 0; } if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 && - child->hw_lvl_id == dev->otx2_tm_root_lvl && !parent) { + child->hw_lvl == dev->otx2_tm_root_lvl && !parent) { uint32_t tschq_con_index; - l_id = child->hw_lvl_id; + l_id = child->hw_lvl; tschq_con_index = dev->txschq_index[l_id]; hw_id = dev->txschq_list[l_id][tschq_con_index]; child->hw_id = hw_id; @@ -1025,7 +1382,7 @@ nix_tm_assign_id_to_node(struct otx2_eth_dev *dev, } /* Process children with parents */ - l_id = child->hw_lvl_id; + l_id = child->hw_lvl; schq_index = dev->txschq_index[l_id]; schq_con_index = dev->txschq_contig_index[l_id]; @@ -1050,8 +1407,8 @@ nix_tm_assign_hw_id(struct otx2_eth_dev *dev) for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) { TAILQ_FOREACH(parent, &dev->node_list, node) { - child_hw_lvl = parent->hw_lvl_id - 1; - if (parent->hw_lvl_id != i) + child_hw_lvl = parent->hw_lvl - 1; + if (parent->hw_lvl != i) continue; TAILQ_FOREACH(child, &dev->node_list, node) { if (!child->parent) @@ -1068,7 +1425,7 @@ nix_tm_assign_hw_id(struct otx2_eth_dev *dev) * Explicitly assign id to parent node if it * doesn't have a parent */ - if (parent->hw_lvl_id == dev->otx2_tm_root_lvl) + if (parent->hw_lvl == dev->otx2_tm_root_lvl) nix_tm_assign_id_to_node(dev, parent, NULL); } } @@ -1083,7 +1440,7 @@ nix_tm_count_req_schq(struct otx2_eth_dev *dev, uint8_t contig_count; TAILQ_FOREACH(tm_node, &dev->node_list, node) { - if (lvl == tm_node->hw_lvl_id) { + if (lvl == tm_node->hw_lvl) { req->schq[lvl - 1] += tm_node->rr_num; if (tm_node->max_prio != UINT32_MAX) { contig_count = tm_node->max_prio + 1; @@ -1092,7 +1449,7 @@ nix_tm_count_req_schq(struct otx2_eth_dev *dev, } if (lvl == dev->otx2_tm_root_lvl && dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 && - tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) { + tm_node->hw_lvl == dev->otx2_tm_root_lvl) { req->schq_contig[dev->otx2_tm_root_lvl]++; } } @@ -1138,6 +1495,7 @@ nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev) return rc; nix_tm_copy_rsp_to_dev(dev, rsp); + dev->link_cfg_lvl = rsp->link_cfg_lvl; nix_tm_assign_hw_id(dev); return 0; @@ -1148,8 +1506,8 @@ nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable) { struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); struct otx2_nix_tm_node *tm_node; - uint16_t sq, smq, rr_quantum; struct otx2_eth_txq *txq; + uint16_t sq; int rc; nix_tm_update_parent_info(dev); @@ -1166,175 +1524,1733 @@ nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable) return rc; } - /* Enable xmit as all the topology is ready */ - TAILQ_FOREACH(tm_node, &dev->node_list, node) { - if (tm_node->flags & NIX_TM_NODE_ENABLED) - continue; + /* Trigger MTU recalculate as SMQ needs MTU conf */ + if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) { + rc = otx2_nix_recalc_mtu(eth_dev); + if (rc) { + otx2_err("TM MTU update failed, rc=%d", rc); + return rc; + } + } - /* Enable xmit on sq */ - if (tm_node->level_id != OTX2_TM_LVL_QUEUE) { + /* Mark all non-leaf's as enabled */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (!nix_tm_is_leaf(dev, tm_node->lvl)) tm_node->flags |= NIX_TM_NODE_ENABLED; + } + + if (!xmit_enable) + return 0; + + /* Update SQ Sched Data while SQ is idle */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (!nix_tm_is_leaf(dev, tm_node->lvl)) continue; + + rc = nix_sq_sched_data(dev, tm_node, false); + if (rc) { + otx2_err("SQ %u sched update failed, rc=%d", + tm_node->id, rc); + return rc; } + } - /* Don't enable SMQ or mark as enable */ - if (!xmit_enable) + /* Finally XON all SMQ's */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ) continue; - sq = tm_node->id; - if (sq > eth_dev->data->nb_tx_queues) { - rc = -EFAULT; - break; + rc = nix_smq_xoff(dev, tm_node, false); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->hw_id, rc); + return rc; } + } - txq = eth_dev->data->tx_queues[sq]; + /* Enable xmit as all the topology is ready */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (!nix_tm_is_leaf(dev, tm_node->lvl)) + continue; - smq = tm_node->parent->hw_id; - rr_quantum = (tm_node->weight * - NIX_TM_RR_QUANTUM_MAX) / MAX_SCHED_WEIGHT; + sq = tm_node->id; + txq = eth_dev->data->tx_queues[sq]; - rc = nix_tm_sw_xon(txq, smq, rr_quantum); - if (rc) - break; + rc = otx2_nix_sq_enable(txq); + if (rc) { + otx2_err("TM sw xon failed on SQ %u, rc=%d", + tm_node->id, rc); + return rc; + } tm_node->flags |= NIX_TM_NODE_ENABLED; } - if (rc) - otx2_err("TM failed to enable xmit on sq %u, rc=%d", sq, rc); - - return rc; + return 0; } static int -nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev) +send_tm_reqval(struct otx2_mbox *mbox, + struct nix_txschq_config *req, + struct rte_tm_error *error) { - struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); - uint32_t def = eth_dev->data->nb_tx_queues; - struct rte_tm_node_params params; - uint32_t leaf_parent, i; - int rc = 0; + int rc; - /* Default params */ - memset(¶ms, 0, sizeof(params)); - params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + if (!req->num_regs || + req->num_regs > MAX_REGS_PER_MBOX_MSG) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "invalid config"; + return -EIO; + } + rc = otx2_mbox_process(mbox); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + } + return rc; +} + +static uint16_t +nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl) +{ if (nix_tm_have_tl1_access(dev)) { - dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1; - rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, - DEFAULT_RR_WEIGHT, - NIX_TXSCH_LVL_TL1, - OTX2_TM_LVL_ROOT, false, ¶ms); - if (rc) - goto exit; - rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, - DEFAULT_RR_WEIGHT, - NIX_TXSCH_LVL_TL2, - OTX2_TM_LVL_SCH1, false, ¶ms); - if (rc) - goto exit; + switch (lvl) { + case OTX2_TM_LVL_ROOT: + return NIX_TXSCH_LVL_TL1; + case OTX2_TM_LVL_SCH1: + return NIX_TXSCH_LVL_TL2; + case OTX2_TM_LVL_SCH2: + return NIX_TXSCH_LVL_TL3; + case OTX2_TM_LVL_SCH3: + return NIX_TXSCH_LVL_TL4; + case OTX2_TM_LVL_SCH4: + return NIX_TXSCH_LVL_SMQ; + default: + return NIX_TXSCH_LVL_CNT; + } + } else { + switch (lvl) { + case OTX2_TM_LVL_ROOT: + return NIX_TXSCH_LVL_TL2; + case OTX2_TM_LVL_SCH1: + return NIX_TXSCH_LVL_TL3; + case OTX2_TM_LVL_SCH2: + return NIX_TXSCH_LVL_TL4; + case OTX2_TM_LVL_SCH3: + return NIX_TXSCH_LVL_SMQ; + default: + return NIX_TXSCH_LVL_CNT; + } + } +} - rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, - DEFAULT_RR_WEIGHT, - NIX_TXSCH_LVL_TL3, - OTX2_TM_LVL_SCH2, false, ¶ms); - if (rc) - goto exit; +static uint16_t +nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl) +{ + if (hw_lvl >= NIX_TXSCH_LVL_CNT) + return 0; - rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0, - DEFAULT_RR_WEIGHT, - NIX_TXSCH_LVL_TL4, - OTX2_TM_LVL_SCH3, false, ¶ms); - if (rc) - goto exit; + /* MDQ doesn't support SP */ + if (hw_lvl == NIX_TXSCH_LVL_MDQ) + return 0; - rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0, - DEFAULT_RR_WEIGHT, - NIX_TXSCH_LVL_SMQ, - OTX2_TM_LVL_SCH4, false, ¶ms); - if (rc) - goto exit; + /* PF's TL1 with VF's enabled doesn't support SP */ + if (hw_lvl == NIX_TXSCH_LVL_TL1 && + (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 || + (dev->tm_flags & NIX_TM_TL1_NO_SP))) + return 0; - leaf_parent = def + 4; - } else { - dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2; - rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, - DEFAULT_RR_WEIGHT, - NIX_TXSCH_LVL_TL2, - OTX2_TM_LVL_ROOT, false, ¶ms); - if (rc) - goto exit; + return TXSCH_TLX_SP_PRIO_MAX - 1; +} - rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, - DEFAULT_RR_WEIGHT, - NIX_TXSCH_LVL_TL3, - OTX2_TM_LVL_SCH1, false, ¶ms); - if (rc) - goto exit; - rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, - DEFAULT_RR_WEIGHT, - NIX_TXSCH_LVL_TL4, - OTX2_TM_LVL_SCH2, false, ¶ms); - if (rc) - goto exit; +static int +validate_prio(struct otx2_eth_dev *dev, uint32_t lvl, + uint32_t parent_id, uint32_t priority, + struct rte_tm_error *error) +{ + uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX]; + struct otx2_nix_tm_node *tm_node; + uint32_t rr_num = 0; + int i; - rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0, - DEFAULT_RR_WEIGHT, - NIX_TXSCH_LVL_SMQ, - OTX2_TM_LVL_SCH3, false, ¶ms); - if (rc) - goto exit; + /* Validate priority against max */ + if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "unsupported priority value"; + return -EINVAL; + } - leaf_parent = def + 3; + if (parent_id == RTE_TM_NODE_ID_NULL) + return 0; + + memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX); + priorities[priority] = 1; + + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (!tm_node->parent) + continue; + + if (!(tm_node->flags & NIX_TM_NODE_USER)) + continue; + + if (tm_node->parent->id != parent_id) + continue; + + priorities[tm_node->priority]++; } - /* Add leaf nodes */ - for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { - rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0, - DEFAULT_RR_WEIGHT, - NIX_TXSCH_LVL_CNT, - OTX2_TM_LVL_QUEUE, false, ¶ms); - if (rc) - break; + for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++) + if (priorities[i] > 1) + rr_num++; + + /* At max, one rr groups per parent */ + if (rr_num > 1) { + error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; + error->message = "multiple DWRR node priority"; + return -EINVAL; } -exit: - return rc; + /* Check for previous priority to avoid holes in priorities */ + if (priority && !priorities[priority - 1]) { + error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; + error->message = "priority not in order"; + return -EINVAL; + } + + return 0; } -void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev) +static int +read_tm_reg(struct otx2_mbox *mbox, uint64_t reg, + uint64_t *regval, uint32_t hw_lvl) { - struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + volatile struct nix_txschq_config *req; + struct nix_txschq_config *rsp; + int rc; - TAILQ_INIT(&dev->node_list); - TAILQ_INIT(&dev->shaper_profile_list); + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->read = 1; + req->lvl = hw_lvl; + req->reg[0] = reg; + req->num_regs = 1; + + rc = otx2_mbox_process_msg(mbox, (void **)&rsp); + if (rc) + return rc; + *regval = rsp->regval[0]; + return 0; } -int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev) +/* Search for min rate in topology */ +static void +nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev) { - struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); - uint16_t sq_cnt = eth_dev->data->nb_tx_queues; - int rc; + struct otx2_nix_tm_shaper_profile *profile; + uint64_t rate_min = 1E9; /* 1 Gbps */ - /* Free up all resources already held */ - rc = nix_tm_free_resources(dev, 0, 0, false); - if (rc) { - otx2_err("Failed to freeup existing resources,rc=%d", rc); - return rc; + TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) { + if (profile->params.peak.rate && + profile->params.peak.rate < rate_min) + rate_min = profile->params.peak.rate; + + if (profile->params.committed.rate && + profile->params.committed.rate < rate_min) + rate_min = profile->params.committed.rate; } - /* Clear shaper profiles */ - nix_tm_clear_shaper_profiles(dev); - dev->tm_flags = NIX_TM_DEFAULT_TREE; + dev->tm_rate_min = rate_min; +} - rc = nix_tm_prepare_default_tree(eth_dev); - if (rc != 0) - return rc; +static int +nix_xmit_disable(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t sq_cnt = eth_dev->data->nb_tx_queues; + uint16_t sqb_cnt, head_off, tail_off; + struct otx2_nix_tm_node *tm_node; + struct otx2_eth_txq *txq; + uint64_t wdata, val; + int i, rc; - rc = nix_tm_alloc_resources(eth_dev, false); - if (rc != 0) - return rc; - dev->tm_leaf_cnt = sq_cnt; + otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name); + + /* Enable CGX RXTX to drain pkts */ + if (!eth_dev->data->dev_started) { + otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox); + rc = otx2_mbox_process(dev->mbox); + if (rc) + return rc; + } + + /* XON all SMQ's */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ) + continue; + if (!(tm_node->flags & NIX_TM_NODE_HWRES)) + continue; + + rc = nix_smq_xoff(dev, tm_node, false); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->hw_id, rc); + goto cleanup; + } + } + + /* Flush all tx queues */ + for (i = 0; i < sq_cnt; i++) { + txq = eth_dev->data->tx_queues[i]; + + rc = otx2_nix_sq_sqb_aura_fc(txq, false); + if (rc) { + otx2_err("Failed to disable sqb aura fc, rc=%d", rc); + goto cleanup; + } + + /* Wait for sq entries to be flushed */ + rc = nix_txq_flush_sq_spin(txq); + if (rc) { + otx2_err("Failed to drain sq, rc=%d\n", rc); + goto cleanup; + } + } + + /* XOFF & Flush all SMQ's. HRM mandates + * all SQ's empty before SMQ flush is issued. + */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ) + continue; + if (!(tm_node->flags & NIX_TM_NODE_HWRES)) + continue; + + rc = nix_smq_xoff(dev, tm_node, true); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->hw_id, rc); + goto cleanup; + } + } + + /* Verify sanity of all tx queues */ + for (i = 0; i < sq_cnt; i++) { + txq = eth_dev->data->tx_queues[i]; + + wdata = ((uint64_t)txq->sq << 32); + val = otx2_atomic64_add_nosync(wdata, + (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS)); + + sqb_cnt = val & 0xFFFF; + head_off = (val >> 20) & 0x3F; + tail_off = (val >> 28) & 0x3F; + + if (sqb_cnt > 1 || head_off != tail_off || + (*txq->fc_mem != txq->nb_sqb_bufs)) + otx2_err("Failed to gracefully flush sq %u", txq->sq); + } + +cleanup: + /* restore cgx state */ + if (!eth_dev->data->dev_started) { + otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox); + rc |= otx2_mbox_process(dev->mbox); + } + + return rc; +} + +static int +otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + + if (is_leaf == NULL) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + return -EINVAL; + } + + tm_node = nix_tm_node_search(dev, node_id, true); + if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + return -EINVAL; + } + if (nix_tm_is_leaf(dev, tm_node->lvl)) + *is_leaf = true; + else + *is_leaf = false; + return 0; +} + +static int +otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + int rc, max_nr_nodes = 0, i; + struct free_rsrcs_rsp *rsp; + + memset(cap, 0, sizeof(*cap)); + + otx2_mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + return rc; + } + + for (i = 0; i < NIX_TXSCH_LVL_TL1; i++) + max_nr_nodes += rsp->schq[i]; + + cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt; + /* TL1 level is reserved for PF */ + cap->n_levels_max = nix_tm_have_tl1_access(dev) ? + OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1; + cap->non_leaf_nodes_identical = 1; + cap->leaf_nodes_identical = 1; + + /* Shaper Capabilities */ + cap->shaper_private_n_max = max_nr_nodes; + cap->shaper_n_max = max_nr_nodes; + cap->shaper_private_dual_rate_n_max = max_nr_nodes; + cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8; + cap->shaper_private_packet_mode_supported = 1; + cap->shaper_private_byte_mode_supported = 1; + cap->shaper_pkt_length_adjust_min = NIX_LENGTH_ADJUST_MIN; + cap->shaper_pkt_length_adjust_max = NIX_LENGTH_ADJUST_MAX; + + /* Schedule Capabilities */ + cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ]; + cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX; + cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max; + cap->sched_wfq_n_groups_max = 1; + cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT; + cap->sched_wfq_packet_mode_supported = 1; + cap->sched_wfq_byte_mode_supported = 1; + + cap->dynamic_update_mask = + RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL | + RTE_TM_UPDATE_NODE_SUSPEND_RESUME; + cap->stats_mask = + RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES | + RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + + for (i = 0; i < RTE_COLORS; i++) { + cap->mark_vlan_dei_supported[i] = false; + cap->mark_ip_ecn_tcp_supported[i] = false; + cap->mark_ip_dscp_supported[i] = false; + } + + return 0; +} + +static int +otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct free_rsrcs_rsp *rsp; + uint16_t hw_lvl; + int rc; + + memset(cap, 0, sizeof(*cap)); + + otx2_mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + return rc; + } + + hw_lvl = nix_tm_lvl2nix(dev, lvl); + + if (nix_tm_is_leaf(dev, lvl)) { + /* Leaf */ + cap->n_nodes_max = dev->tm_leaf_cnt; + cap->n_nodes_leaf_max = dev->tm_leaf_cnt; + cap->leaf_nodes_identical = 1; + cap->leaf.stats_mask = + RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES; + + } else if (lvl == OTX2_TM_LVL_ROOT) { + /* Root node, aka TL2(vf)/TL1(pf) */ + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->non_leaf_nodes_identical = 1; + + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = + nix_tm_have_tl1_access(dev) ? false : true; + cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8; + cap->nonleaf.shaper_private_packet_mode_supported = 1; + cap->nonleaf.shaper_private_byte_mode_supported = 1; + + cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1]; + cap->nonleaf.sched_sp_n_priorities_max = + nix_max_prio(dev, hw_lvl) + 1; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT; + cap->nonleaf.sched_wfq_packet_mode_supported = 1; + cap->nonleaf.sched_wfq_byte_mode_supported = 1; + + if (nix_tm_have_tl1_access(dev)) + cap->nonleaf.stats_mask = + RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + } else if ((lvl < OTX2_TM_LVL_MAX) && + (hw_lvl < NIX_TXSCH_LVL_CNT)) { + /* TL2, TL3, TL4, MDQ */ + cap->n_nodes_max = rsp->schq[hw_lvl]; + cap->n_nodes_nonleaf_max = cap->n_nodes_max; + cap->non_leaf_nodes_identical = 1; + + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = true; + cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8; + cap->nonleaf.shaper_private_packet_mode_supported = 1; + cap->nonleaf.shaper_private_byte_mode_supported = 1; + + /* MDQ doesn't support Strict Priority */ + if (hw_lvl == NIX_TXSCH_LVL_MDQ) + cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt; + else + cap->nonleaf.sched_n_children_max = + rsp->schq[hw_lvl - 1]; + cap->nonleaf.sched_sp_n_priorities_max = + nix_max_prio(dev, hw_lvl) + 1; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT; + cap->nonleaf.sched_wfq_packet_mode_supported = 1; + cap->nonleaf.sched_wfq_byte_mode_supported = 1; + } else { + /* unsupported level */ + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + return rc; + } + return 0; +} + +static int +otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct free_rsrcs_rsp *rsp; + int rc, hw_lvl, lvl; + + memset(cap, 0, sizeof(*cap)); + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + hw_lvl = tm_node->hw_lvl; + lvl = tm_node->lvl; + + /* Leaf node */ + if (nix_tm_is_leaf(dev, lvl)) { + cap->stats_mask = RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES; + return 0; + } + + otx2_mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + return rc; + } + + /* Non Leaf Shaper */ + cap->shaper_private_supported = true; + cap->shaper_private_dual_rate_supported = + (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true; + cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8; + cap->shaper_private_packet_mode_supported = 1; + cap->shaper_private_byte_mode_supported = 1; + + /* Non Leaf Scheduler */ + if (hw_lvl == NIX_TXSCH_LVL_MDQ) + cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt; + else + cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1]; + + cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = + cap->nonleaf.sched_n_children_max; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT; + cap->nonleaf.sched_wfq_packet_mode_supported = 1; + cap->nonleaf.sched_wfq_byte_mode_supported = 1; + + if (hw_lvl == NIX_TXSCH_LVL_TL1) + cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + return 0; +} + +static int +otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, + uint32_t profile_id, + struct rte_tm_shaper_params *params, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile *profile; + + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID exist"; + return -EINVAL; + } + + /* Committed rate and burst size can be enabled/disabled */ + if (params->committed.size || params->committed.rate) { + if (params->committed.size < MIN_SHAPER_BURST || + params->committed.size > MAX_SHAPER_BURST) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; + return -EINVAL; + } else if (!shaper_rate_to_nix(params->committed.rate * 8, + NULL, NULL, NULL)) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; + error->message = "shaper committed rate invalid"; + return -EINVAL; + } + } + + /* Peak rate and burst size can be enabled/disabled */ + if (params->peak.size || params->peak.rate) { + if (params->peak.size < MIN_SHAPER_BURST || + params->peak.size > MAX_SHAPER_BURST) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; + return -EINVAL; + } else if (!shaper_rate_to_nix(params->peak.rate * 8, + NULL, NULL, NULL)) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; + error->message = "shaper peak rate invalid"; + return -EINVAL; + } + } + + if (params->pkt_length_adjust < NIX_LENGTH_ADJUST_MIN || + params->pkt_length_adjust > NIX_LENGTH_ADJUST_MAX) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN; + error->message = "length adjust invalid"; + return -EINVAL; + } + + profile = rte_zmalloc("otx2_nix_tm_shaper_profile", + sizeof(struct otx2_nix_tm_shaper_profile), 0); + if (!profile) + return -ENOMEM; + + profile->shaper_profile_id = profile_id; + rte_memcpy(&profile->params, params, + sizeof(struct rte_tm_shaper_params)); + TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper); + + otx2_tm_dbg("Added TM shaper profile %u, " + " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64 + ", cbs %" PRIu64 " , adj %u, pkt mode %d", + profile_id, + params->peak.rate * 8, + params->peak.size, + params->committed.rate * 8, + params->committed.size, + params->pkt_length_adjust, + params->packet_mode); + + /* Translate rate as bits per second */ + profile->params.peak.rate = profile->params.peak.rate * 8; + profile->params.committed.rate = profile->params.committed.rate * 8; + /* Always use PIR for single rate shaping */ + if (!params->peak.rate && params->committed.rate) { + profile->params.peak = profile->params.committed; + memset(&profile->params.committed, 0, + sizeof(profile->params.committed)); + } + + /* update min rate */ + nix_tm_shaper_profile_update_min(dev); + return 0; +} + +static int +otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev, + uint32_t profile_id, + struct rte_tm_error *error) +{ + struct otx2_nix_tm_shaper_profile *profile; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + profile = nix_tm_shaper_profile_search(dev, profile_id); + + if (!profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID not exist"; + return -EINVAL; + } + + if (profile->reference_count) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "shaper profile in use"; + return -EINVAL; + } + + otx2_tm_dbg("Removing TM shaper profile %u", profile_id); + TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper); + rte_free(profile); + + /* update min rate */ + nix_tm_shaper_profile_update_min(dev); + return 0; +} + +static int +otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t lvl, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile *profile = NULL; + struct otx2_nix_tm_node *parent_node; + int rc, pkt_mode, clear_on_fail = 0; + uint32_t exp_next_lvl, i; + uint32_t profile_id; + uint16_t hw_lvl; + + /* we don't support dynamic updates */ + if (dev->tm_flags & NIX_TM_COMMITTED) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "dynamic update not supported"; + return -EIO; + } + + /* Leaf nodes have to be same priority */ + if (nix_tm_is_leaf(dev, lvl) && priority != 0) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "queue shapers must be priority 0"; + return -EIO; + } + + parent_node = nix_tm_node_search(dev, parent_node_id, true); + + /* find the right level */ + if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) { + if (parent_node_id == RTE_TM_NODE_ID_NULL) { + lvl = OTX2_TM_LVL_ROOT; + } else if (parent_node) { + lvl = parent_node->lvl + 1; + } else { + /* Neigher proper parent nor proper level id given */ + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "invalid parent node id"; + return -ERANGE; + } + } + + /* Translate rte_tm level id's to nix hw level id's */ + hw_lvl = nix_tm_lvl2nix(dev, lvl); + if (hw_lvl == NIX_TXSCH_LVL_CNT && + !nix_tm_is_leaf(dev, lvl)) { + error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; + error->message = "invalid level id"; + return -ERANGE; + } + + if (node_id < dev->tm_leaf_cnt) + exp_next_lvl = NIX_TXSCH_LVL_SMQ; + else + exp_next_lvl = hw_lvl + 1; + + /* Check if there is no parent node yet */ + if (hw_lvl != dev->otx2_tm_root_lvl && + (!parent_node || parent_node->hw_lvl != exp_next_lvl)) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "invalid parent node id"; + return -EINVAL; + } + + /* Check if a node already exists */ + if (nix_tm_node_search(dev, node_id, true)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "node already exists"; + return -EINVAL; + } + + if (!nix_tm_is_leaf(dev, lvl)) { + /* Check if shaper profile exists for non leaf node */ + profile_id = params->shaper_profile_id; + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE && !profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "invalid shaper profile"; + return -EINVAL; + } + + /* Minimum static priority count is 1 */ + if (!params->nonleaf.n_sp_priorities || + params->nonleaf.n_sp_priorities > TXSCH_TLX_SP_PRIO_MAX) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES; + error->message = "invalid sp priorities"; + return -EINVAL; + } + + pkt_mode = 0; + /* Validate weight mode */ + for (i = 0; i < params->nonleaf.n_sp_priorities && + params->nonleaf.wfq_weight_mode; i++) { + pkt_mode = !params->nonleaf.wfq_weight_mode[i]; + if (pkt_mode == !params->nonleaf.wfq_weight_mode[0]) + continue; + + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; + error->message = "unsupported weight mode"; + return -EINVAL; + } + + if (profile && params->nonleaf.n_sp_priorities && + pkt_mode != profile->params.packet_mode) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "shaper wfq packet mode mismatch"; + return -EINVAL; + } + } + + /* Check if there is second DWRR already in siblings or holes in prio */ + if (validate_prio(dev, lvl, parent_node_id, priority, error)) + return -EINVAL; + + if (weight > MAX_SCHED_WEIGHT) { + error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; + error->message = "max weight exceeded"; + return -EINVAL; + } + + rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id, + priority, weight, hw_lvl, + lvl, true, params); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + /* cleanup user added nodes */ + if (clear_on_fail) + nix_tm_free_resources(dev, NIX_TM_NODE_USER, + NIX_TM_NODE_USER, false); + error->message = "failed to add node"; + return rc; + } + error->type = RTE_TM_ERROR_TYPE_NONE; + return 0; +} + +static int +otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node, *child_node; + struct otx2_nix_tm_shaper_profile *profile; + uint32_t profile_id; + + /* we don't support dynamic updates yet */ + if (dev->tm_flags & NIX_TM_COMMITTED) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "hierarchy exists"; + return -EIO; + } + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Check for any existing children */ + TAILQ_FOREACH(child_node, &dev->node_list, node) { + if (child_node->parent == tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "children exist"; + return -EINVAL; + } + } + + /* Remove shaper profile reference */ + profile_id = tm_node->params.shaper_profile_id; + profile = nix_tm_shaper_profile_search(dev, profile_id); + profile->reference_count--; + + TAILQ_REMOVE(&dev->node_list, tm_node, node); + rte_free(tm_node); + return 0; +} + +static int +nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error, bool suspend) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct nix_txschq_config *req; + uint16_t flags; + int rc; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + if (!(dev->tm_flags & NIX_TM_COMMITTED)) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy doesn't exist"; + return -EINVAL; + } + + flags = tm_node->flags; + flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) : + (flags | NIX_TM_NODE_ENABLED); + + if (tm_node->flags == flags) + return 0; + + /* send mbox for state change */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + + req->lvl = tm_node->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node, suspend, + req->reg, req->regval); + rc = send_tm_reqval(mbox, req, error); + if (!rc) + tm_node->flags = flags; + return rc; +} + +static int +otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error) +{ + return nix_tm_node_suspend_resume(eth_dev, node_id, error, true); +} + +static int +otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error) +{ + return nix_tm_node_suspend_resume(eth_dev, node_id, error, false); +} + +static int +otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + uint32_t leaf_cnt = 0; + int rc; + + if (dev->tm_flags & NIX_TM_COMMITTED) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy exists"; + return -EINVAL; + } + + /* Check if we have all the leaf nodes */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->flags & NIX_TM_NODE_USER && + tm_node->id < dev->tm_leaf_cnt) + leaf_cnt++; + } + + if (leaf_cnt != dev->tm_leaf_cnt) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "incomplete hierarchy"; + return -EINVAL; + } + + /* + * Disable xmit will be enabled when + * new topology is available. + */ + rc = nix_xmit_disable(eth_dev); + if (rc) { + otx2_err("failed to disable TX, rc=%d", rc); + return -EIO; + } + + /* Delete default/ratelimit tree */ + if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) { + rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "failed to free default resources"; + return rc; + } + dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE | + NIX_TM_RATE_LIMIT_TREE); + } + + /* Free up user alloc'ed resources */ + rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, + NIX_TM_NODE_USER, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "failed to free user resources"; + return rc; + } + + rc = nix_tm_alloc_resources(eth_dev, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "alloc resources failed"; + /* TODO should we restore default config ? */ + if (clear_on_fail) + nix_tm_free_resources(dev, 0, 0, false); + return rc; + } + + error->type = RTE_TM_ERROR_TYPE_NONE; + dev->tm_flags |= NIX_TM_COMMITTED; + return 0; +} + +static int +otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, + uint32_t node_id, + uint32_t profile_id, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile *profile = NULL; + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct nix_txschq_config *req; + uint8_t k; + int rc; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node"; + return -EINVAL; + } + + if (profile_id == tm_node->params.shaper_profile_id) + return 0; + + if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (!profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID not exist"; + return -EINVAL; + } + } + + if (profile && profile->params.packet_mode != tm_node->pkt_mode) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile pkt mode mismatch"; + return -EINVAL; + } + + tm_node->params.shaper_profile_id = profile_id; + + /* Nothing to do if not yet committed */ + if (!(dev->tm_flags & NIX_TM_COMMITTED)) + return 0; + + tm_node->flags &= ~NIX_TM_NODE_ENABLED; + + /* Flush the specific node with SW_XOFF */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = tm_node->hw_lvl; + k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval); + req->num_regs = k; + + rc = send_tm_reqval(mbox, req, error); + if (rc) + return rc; + + shaper_default_red_algo(dev, tm_node, profile); + + /* Update the PIR/CIR and clear SW XOFF */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = tm_node->hw_lvl; + + k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval); + + k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]); + + req->num_regs = k; + rc = send_tm_reqval(mbox, req, error); + if (!rc) + tm_node->flags |= NIX_TM_NODE_ENABLED; + return rc; +} + +static int +otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, + uint32_t node_id, uint32_t new_parent_id, + uint32_t priority, uint32_t weight, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node, *sibling; + struct otx2_nix_tm_node *new_parent; + struct nix_txschq_config *req; + uint8_t k; + int rc; + + if (!(dev->tm_flags & NIX_TM_COMMITTED)) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy doesn't exist"; + return -EINVAL; + } + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Parent id valid only for non root nodes */ + if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) { + new_parent = nix_tm_node_search(dev, new_parent_id, true); + if (!new_parent) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "no such parent node"; + return -EINVAL; + } + + /* Current support is only for dynamic weight update */ + if (tm_node->parent != new_parent || + tm_node->priority != priority) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "only weight update supported"; + return -EINVAL; + } + } + + /* Skip if no change */ + if (tm_node->weight == weight) + return 0; + + tm_node->weight = weight; + + /* For leaf nodes, SQ CTX needs update */ + if (nix_tm_is_leaf(dev, tm_node->lvl)) { + /* Update SQ quantum data on the fly */ + rc = nix_sq_sched_data(dev, tm_node, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "sq sched data update failed"; + return rc; + } + } else { + /* XOFF Parent node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->parent->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XOFF this node and all other siblings */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + + k = 0; + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + k += prepare_tm_sw_xoff(sibling, true, &req->reg[k], + &req->regval[k]); + } + req->num_regs = k; + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* Update new weight for current node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + req->num_regs = prepare_tm_sched_reg(dev, tm_node, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XON this node and all other siblings */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + + k = 0; + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + k += prepare_tm_sw_xoff(sibling, false, &req->reg[k], + &req->regval[k]); + } + req->num_regs = k; + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XON Parent node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->parent->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + } + return 0; +} + +static int +otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_node_stats *stats, + uint64_t *stats_mask, int clear, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + uint64_t reg, val; + int64_t *addr; + int rc = 0; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Stats support only for leaf node or TL1 root */ + if (nix_tm_is_leaf(dev, tm_node->lvl)) { + reg = (((uint64_t)tm_node->id) << 32); + + /* Packets */ + addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->n_pkts = val - tm_node->last_pkts; + + /* Bytes */ + addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->n_bytes = val - tm_node->last_bytes; + + if (clear) { + tm_node->last_pkts = stats->n_pkts; + tm_node->last_bytes = stats->n_bytes; + } + + *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES; + + } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "stats read error"; + + /* RED Drop packets */ + reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id); + rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1); + if (rc) + goto exit; + stats->leaf.n_pkts_dropped[RTE_COLOR_RED] = + val - tm_node->last_pkts; + + /* RED Drop bytes */ + reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id); + rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1); + if (rc) + goto exit; + stats->leaf.n_bytes_dropped[RTE_COLOR_RED] = + val - tm_node->last_bytes; + + /* Clear stats */ + if (clear) { + tm_node->last_pkts = + stats->leaf.n_pkts_dropped[RTE_COLOR_RED]; + tm_node->last_bytes = + stats->leaf.n_bytes_dropped[RTE_COLOR_RED]; + } + + *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + + } else { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "unsupported node"; + rc = -EINVAL; + } + +exit: + return rc; +} + +const struct rte_tm_ops otx2_tm_ops = { + .node_type_get = otx2_nix_tm_node_type_get, + + .capabilities_get = otx2_nix_tm_capa_get, + .level_capabilities_get = otx2_nix_tm_level_capa_get, + .node_capabilities_get = otx2_nix_tm_node_capa_get, + + .shaper_profile_add = otx2_nix_tm_shaper_profile_add, + .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete, + + .node_add = otx2_nix_tm_node_add, + .node_delete = otx2_nix_tm_node_delete, + .node_suspend = otx2_nix_tm_node_suspend, + .node_resume = otx2_nix_tm_node_resume, + .hierarchy_commit = otx2_nix_tm_hierarchy_commit, + + .node_shaper_update = otx2_nix_tm_node_shaper_update, + .node_parent_update = otx2_nix_tm_node_parent_update, + .node_stats_read = otx2_nix_tm_node_stats_read, +}; + +static int +nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint32_t def = eth_dev->data->nb_tx_queues; + struct rte_tm_node_params params; + uint32_t leaf_parent, i; + int rc = 0, leaf_level; + + /* Default params */ + memset(¶ms, 0, sizeof(params)); + params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + + if (nix_tm_have_tl1_access(dev)) { + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL1, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto exit; + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH3, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH4, false, ¶ms); + if (rc) + goto exit; + + leaf_parent = def + 4; + leaf_level = OTX2_TM_LVL_QUEUE; + } else { + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH3, false, ¶ms); + if (rc) + goto exit; + + leaf_parent = def + 3; + leaf_level = OTX2_TM_LVL_SCH4; + } + + /* Add leaf nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_CNT, + leaf_level, false, ¶ms); + if (rc) + break; + } + +exit: + return rc; +} + +void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + TAILQ_INIT(&dev->node_list); + TAILQ_INIT(&dev->shaper_profile_list); + dev->tm_rate_min = 1E9; /* 1Gbps */ +} + +int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t sq_cnt = eth_dev->data->nb_tx_queues; + int rc; + + /* Free up all resources already held */ + rc = nix_tm_free_resources(dev, 0, 0, false); + if (rc) { + otx2_err("Failed to freeup existing resources,rc=%d", rc); + return rc; + } + + /* Clear shaper profiles */ + nix_tm_clear_shaper_profiles(dev); + dev->tm_flags = NIX_TM_DEFAULT_TREE; + + /* Disable TL1 Static Priority when VF's are enabled + * as otherwise VF's TL2 reallocation will be needed + * runtime to support a specific topology of PF. + */ + if (pci_dev->max_vfs) + dev->tm_flags |= NIX_TM_TL1_NO_SP; + + rc = nix_tm_prepare_default_tree(eth_dev); + if (rc != 0) + return rc; + + rc = nix_tm_alloc_resources(eth_dev, false); + if (rc != 0) + return rc; + dev->tm_leaf_cnt = sq_cnt; + + return 0; +} + +static int +nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint32_t def = eth_dev->data->nb_tx_queues; + struct rte_tm_node_params params; + uint32_t leaf_parent, i, rc = 0; + + memset(¶ms, 0, sizeof(params)); + + if (nix_tm_have_tl1_access(dev)) { + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL1, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH3, false, ¶ms); + if (rc) + goto error; + leaf_parent = def + 3; + + /* Add per queue SMQ nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i, + leaf_parent, + 0, DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH4, + false, ¶ms); + if (rc) + goto error; + } + + /* Add leaf nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, i, + leaf_parent + 1 + i, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_CNT, + OTX2_TM_LVL_QUEUE, + false, ¶ms); + if (rc) + goto error; + } + + return 0; + } + + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto error; + leaf_parent = def + 2; + + /* Add per queue SMQ nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i, + leaf_parent, + 0, DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH3, + false, ¶ms); + if (rc) + goto error; + } + + /* Add leaf nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_CNT, + OTX2_TM_LVL_SCH4, + false, ¶ms); + if (rc) + break; + } +error: + return rc; +} + +static int +otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev, + struct otx2_nix_tm_node *tm_node, + uint64_t tx_rate) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile profile; + struct otx2_mbox *mbox = dev->mbox; + volatile uint64_t *reg, *regval; + struct nix_txschq_config *req; + uint16_t flags; + uint8_t k = 0; + int rc; + + flags = tm_node->flags; + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = NIX_TXSCH_LVL_MDQ; + reg = req->reg; + regval = req->regval; + + if (tx_rate == 0) { + k += prepare_tm_sw_xoff(tm_node, true, ®[k], ®val[k]); + flags &= ~NIX_TM_NODE_ENABLED; + goto exit; + } + + if (!(flags & NIX_TM_NODE_ENABLED)) { + k += prepare_tm_sw_xoff(tm_node, false, ®[k], ®val[k]); + flags |= NIX_TM_NODE_ENABLED; + } + + /* Use only PIR for rate limit */ + memset(&profile, 0, sizeof(profile)); + profile.params.peak.rate = tx_rate; + /* Minimum burst of ~4us Bytes of Tx */ + profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS, + (4ull * tx_rate) / (1E6 * 8)); + if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate) + dev->tm_rate_min = tx_rate; + + k += prepare_tm_shaper_reg(tm_node, &profile, ®[k], ®val[k]); +exit: + req->num_regs = k; + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + tm_node->flags = flags; + return 0; +} + +int +otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t tx_rate_mbps) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6; + struct otx2_nix_tm_node *tm_node; + int rc; + + /* Check for supported revisions */ + if (otx2_dev_is_95xx_Ax(dev) || + otx2_dev_is_96xx_Ax(dev)) + return -EINVAL; + + if (queue_idx >= eth_dev->data->nb_tx_queues) + return -EINVAL; + + if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) && + !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE)) + goto error; + + if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) && + eth_dev->data->nb_tx_queues > 1) { + /* For TM topology change ethdev needs to be stopped */ + if (eth_dev->data->dev_started) + return -EBUSY; + + /* + * Disable xmit will be enabled when + * new topology is available. + */ + rc = nix_xmit_disable(eth_dev); + if (rc) { + otx2_err("failed to disable TX, rc=%d", rc); + return -EIO; + } + + rc = nix_tm_free_resources(dev, 0, 0, false); + if (rc < 0) { + otx2_tm_dbg("failed to free default resources, rc %d", + rc); + return -EIO; + } + + rc = nix_tm_prepare_rate_limited_tree(eth_dev); + if (rc < 0) { + otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc); + return rc; + } + + rc = nix_tm_alloc_resources(eth_dev, true); + if (rc != 0) { + otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc); + return rc; + } + + dev->tm_flags &= ~NIX_TM_DEFAULT_TREE; + dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE; + } + + tm_node = nix_tm_node_search(dev, queue_idx, false); + + /* check if we found a valid leaf node */ + if (!tm_node || + !nix_tm_is_leaf(dev, tm_node->lvl) || + !tm_node->parent || + tm_node->parent->hw_id == UINT32_MAX) + return -EIO; + + return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate); +error: + otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags); + return -EINVAL; +} + +int +otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + if (!arg) + return -EINVAL; + + /* Check for supported revisions */ + if (otx2_dev_is_95xx_Ax(dev) || + otx2_dev_is_96xx_Ax(dev)) + return -EINVAL; + + *(const void **)arg = &otx2_tm_ops; return 0; } @@ -1377,17 +3293,16 @@ otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq, tm_node = nix_tm_node_search(dev, sq, true); /* Check if we found a valid leaf node */ - if (!tm_node || tm_node->level_id != OTX2_TM_LVL_QUEUE || + if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) || !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) { return -EIO; } /* Get SMQ Id of leaf node's parent */ *smq = tm_node->parent->hw_id; - *rr_quantum = (tm_node->weight * NIX_TM_RR_QUANTUM_MAX) - / MAX_SCHED_WEIGHT; + *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight); - rc = nix_smq_xoff(dev, *smq, false); + rc = nix_smq_xoff(dev, tm_node->parent, false); if (rc) return rc; tm_node->flags |= NIX_TM_NODE_ENABLED;