X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcommon%2Fcnxk%2Froc_nix_tm_utils.c;h=1d7dd68f706ee99fef9806e32088d6fa725a6eb5;hb=aa065a9cf37b417c50a042dba6cc367d3e0c18af;hp=bea23be80568bfd2c4e58ff8b9b56ba55a10aa49;hpb=be3009e75a0a8549bf3b79a4b56b76044972bef4;p=dpdk.git diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c index bea23be805..1d7dd68f70 100644 --- a/drivers/common/cnxk/roc_nix_tm_utils.c +++ b/drivers/common/cnxk/roc_nix_tm_utils.c @@ -5,6 +5,14 @@ #include "roc_api.h" #include "roc_priv.h" +static inline uint64_t +nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper) +{ + return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) | + (shaper->div_exp << 13) | (shaper->exponent << 9) | + (shaper->mantissa << 1); +} + uint16_t nix_tm_lvl2nix_tl1_root(uint32_t lvl) { @@ -50,6 +58,32 @@ nix_tm_lvl2nix(struct nix *nix, uint32_t lvl) return nix_tm_lvl2nix_tl2_root(lvl); } +static uint8_t +nix_tm_relchan_get(struct nix *nix) +{ + return nix->tx_chan_base & 0xff; +} + +static int +nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id, + enum roc_nix_tm_tree tree) +{ + struct nix_tm_node *child_node; + struct nix_tm_node_list *list; + + list = nix_tm_node_list(nix, tree); + + TAILQ_FOREACH(child_node, list, node) { + if (!child_node->parent) + continue; + if (!(child_node->parent->id == node_id)) + continue; + if (child_node->priority == child_node->parent->rr_prio) + continue; + return child_node->hw_id - child_node->priority; + } + return 0; +} struct nix_tm_shaper_profile * nix_tm_shaper_profile_search(struct nix *nix, uint32_t id) @@ -77,6 +111,191 @@ nix_tm_node_search(struct nix *nix, uint32_t node_id, enum roc_nix_tm_tree tree) return NULL; } +uint64_t +nix_tm_shaper_rate_conv(uint64_t value, uint64_t *exponent_p, + uint64_t *mantissa_p, uint64_t *div_exp_p) +{ + uint64_t div_exp, exponent, mantissa; + + /* Boundary checks */ + if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE) + return 0; + + if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) { + /* Calculate rate div_exp and mantissa using + * the following formula: + * + * value = (2E6 * (256 + mantissa) + * / ((1 << div_exp) * 256)) + */ + div_exp = 0; + exponent = 0; + mantissa = NIX_TM_MAX_RATE_MANTISSA; + + while (value < (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp))) + div_exp += 1; + + while (value < ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) / + ((1 << div_exp) * 256))) + mantissa -= 1; + } else { + /* Calculate rate exponent and mantissa using + * the following formula: + * + * value = (2E6 * ((256 + mantissa) << exponent)) / 256 + * + */ + div_exp = 0; + exponent = NIX_TM_MAX_RATE_EXPONENT; + mantissa = NIX_TM_MAX_RATE_MANTISSA; + + while (value < (NIX_TM_SHAPER_RATE_CONST * (1 << exponent))) + exponent -= 1; + + while (value < ((NIX_TM_SHAPER_RATE_CONST * + ((256 + mantissa) << exponent)) / + 256)) + mantissa -= 1; + } + + if (div_exp > NIX_TM_MAX_RATE_DIV_EXP || + exponent > NIX_TM_MAX_RATE_EXPONENT || + mantissa > NIX_TM_MAX_RATE_MANTISSA) + return 0; + + if (div_exp_p) + *div_exp_p = div_exp; + if (exponent_p) + *exponent_p = exponent; + if (mantissa_p) + *mantissa_p = mantissa; + + /* Calculate real rate value */ + return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp); +} + +uint64_t +nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p, + uint64_t *mantissa_p) +{ + uint64_t exponent, mantissa; + + if (value < NIX_TM_MIN_SHAPER_BURST || value > NIX_TM_MAX_SHAPER_BURST) + return 0; + + /* Calculate burst exponent and mantissa using + * the following formula: + * + * value = (((256 + mantissa) << (exponent + 1) + / 256) + * + */ + exponent = NIX_TM_MAX_BURST_EXPONENT; + mantissa = NIX_TM_MAX_BURST_MANTISSA; + + while (value < (1ull << (exponent + 1))) + exponent -= 1; + + while (value < ((256 + mantissa) << (exponent + 1)) / 256) + mantissa -= 1; + + if (exponent > NIX_TM_MAX_BURST_EXPONENT || + mantissa > NIX_TM_MAX_BURST_MANTISSA) + return 0; + + if (exponent_p) + *exponent_p = exponent; + if (mantissa_p) + *mantissa_p = mantissa; + + return NIX_TM_SHAPER_BURST(exponent, mantissa); +} + +static void +nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile, + struct nix_tm_shaper_data *cir, + struct nix_tm_shaper_data *pir) +{ + if (!profile) + return; + + /* Calculate CIR exponent and mantissa */ + if (profile->commit.rate) + cir->rate = nix_tm_shaper_rate_conv( + profile->commit.rate, &cir->exponent, &cir->mantissa, + &cir->div_exp); + + /* Calculate PIR exponent and mantissa */ + if (profile->peak.rate) + pir->rate = nix_tm_shaper_rate_conv( + profile->peak.rate, &pir->exponent, &pir->mantissa, + &pir->div_exp); + + /* Calculate CIR burst exponent and mantissa */ + if (profile->commit.size) + cir->burst = nix_tm_shaper_burst_conv(profile->commit.size, + &cir->burst_exponent, + &cir->burst_mantissa); + + /* Calculate PIR burst exponent and mantissa */ + if (profile->peak.size) + pir->burst = nix_tm_shaper_burst_conv(profile->peak.size, + &pir->burst_exponent, + &pir->burst_mantissa); +} + +uint32_t +nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree, + uint32_t *rr_prio, uint32_t *max_prio) +{ + uint32_t node_cnt[NIX_TM_TLX_SP_PRIO_MAX]; + struct nix_tm_node_list *list; + struct nix_tm_node *node; + uint32_t rr_num = 0, i; + uint32_t children = 0; + uint32_t priority; + + memset(node_cnt, 0, sizeof(node_cnt)); + *rr_prio = 0xF; + *max_prio = UINT32_MAX; + + list = nix_tm_node_list(nix, tree); + TAILQ_FOREACH(node, list, node) { + if (!node->parent) + continue; + + if (!(node->parent->id == parent_id)) + continue; + + priority = node->priority; + node_cnt[priority]++; + children++; + } + + for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) { + if (!node_cnt[i]) + break; + + if (node_cnt[i] > rr_num) { + *rr_prio = i; + rr_num = node_cnt[i]; + } + } + + /* RR group of single RR child is considered as SP */ + if (rr_num == 1) { + *rr_prio = 0xF; + rr_num = 0; + } + + /* Max prio will be returned only when we have non zero prio + * or if a parent has single child. + */ + if (i > 1 || (children == 1)) + *max_prio = i - 1; + return rr_num; +} + static uint16_t nix_tm_max_prio(struct nix *nix, uint16_t hw_lvl) { @@ -141,6 +360,364 @@ nix_tm_validate_prio(struct nix *nix, uint32_t lvl, uint32_t parent_id, return 0; } +bool +nix_tm_child_res_valid(struct nix_tm_node_list *list, + struct nix_tm_node *parent) +{ + struct nix_tm_node *child; + + TAILQ_FOREACH(child, list, node) { + if (child->parent != parent) + continue; + if (!(child->flags & NIX_TM_NODE_HWRES)) + return false; + } + return true; +} + +uint8_t +nix_tm_tl1_default_prep(uint32_t schq, volatile uint64_t *reg, + volatile uint64_t *regval) +{ + uint8_t k = 0; + + /* + * Default config for TL1. + * For VF this is always ignored. + */ + plt_tm_dbg("Default config for main root %s(%u)", + nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq); + + /* Set DWRR quantum */ + reg[k] = NIX_AF_TL1X_SCHEDULE(schq); + regval[k] = NIX_TM_TL1_DFLT_RR_QTM; + k++; + + reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); + regval[k] = (NIX_TM_TL1_DFLT_RR_PRIO << 1); + k++; + + reg[k] = NIX_AF_TL1X_CIR(schq); + regval[k] = 0; + k++; + + return k; +} + +uint8_t +nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, + volatile uint64_t *reg, volatile uint64_t *regval, + volatile uint64_t *regval_mask) +{ + uint8_t k = 0, hw_lvl, parent_lvl; + uint64_t parent = 0, child = 0; + enum roc_nix_tm_tree tree; + uint32_t rr_prio, schq; + uint16_t link, relchan; + + tree = node->tree; + schq = node->hw_id; + hw_lvl = node->hw_lvl; + parent_lvl = hw_lvl + 1; + rr_prio = node->rr_prio; + + /* Root node will not have a parent node */ + if (hw_lvl == nix->tm_root_lvl) + parent = node->parent_hw_id; + else + parent = node->parent->hw_id; + + link = nix->tx_link; + relchan = nix_tm_relchan_get(nix); + + if (hw_lvl != NIX_TXSCH_LVL_SMQ) + child = nix_tm_find_prio_anchor(nix, node->id, tree); + + /* Override default rr_prio when TL1 + * Static Priority is disabled + */ + if (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) { + rr_prio = NIX_TM_TL1_DFLT_RR_PRIO; + child = 0; + } + + plt_tm_dbg("Topology config node %s(%u)->%s(%" PRIu64 ") lvl %u, id %u" + " prio_anchor %" PRIu64 " rr_prio %u (%p)", + nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl), + parent, node->lvl, node->id, child, rr_prio, node); + + /* Prepare Topology and Link config */ + switch (hw_lvl) { + case NIX_TXSCH_LVL_SMQ: + + /* Set xoff which will be cleared later */ + reg[k] = NIX_AF_SMQX_CFG(schq); + regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS | + ((nix->mtu & 0xFFFF) << 8)); + regval_mask[k] = + ~(BIT_ULL(50) | GENMASK_ULL(6, 0) | GENMASK_ULL(23, 8)); + k++; + + /* Parent and schedule conf */ + reg[k] = NIX_AF_MDQX_PARENT(schq); + regval[k] = parent << 16; + k++; + + break; + case NIX_TXSCH_LVL_TL4: + /* Parent and schedule conf */ + reg[k] = NIX_AF_TL4X_PARENT(schq); + regval[k] = parent << 16; + k++; + + reg[k] = NIX_AF_TL4X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1); + k++; + + /* Configure TL4 to send to SDP channel instead of CGX/LBK */ + if (nix->sdp_link) { + reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq); + regval[k] = BIT_ULL(12); + k++; + } + break; + case NIX_TXSCH_LVL_TL3: + /* Parent and schedule conf */ + reg[k] = NIX_AF_TL3X_PARENT(schq); + regval[k] = parent << 16; + k++; + + reg[k] = NIX_AF_TL3X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1); + k++; + + /* Link configuration */ + if (!nix->sdp_link && + nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) { + reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); + regval[k] = BIT_ULL(12) | relchan; + k++; + } + + break; + case NIX_TXSCH_LVL_TL2: + /* Parent and schedule conf */ + reg[k] = NIX_AF_TL2X_PARENT(schq); + regval[k] = parent << 16; + k++; + + reg[k] = NIX_AF_TL2X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1); + k++; + + /* Link configuration */ + if (!nix->sdp_link && + nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) { + reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); + regval[k] = BIT_ULL(12) | relchan; + k++; + } + + break; + case NIX_TXSCH_LVL_TL1: + reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/); + k++; + + break; + } + + return k; +} + +uint8_t +nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node, + volatile uint64_t *reg, volatile uint64_t *regval) +{ + uint64_t strict_prio = node->priority; + uint32_t hw_lvl = node->hw_lvl; + uint32_t schq = node->hw_id; + uint64_t rr_quantum; + uint8_t k = 0; + + rr_quantum = nix_tm_weight_to_rr_quantum(node->weight); + + /* For children to root, strict prio is default if either + * device root is TL2 or TL1 Static Priority is disabled. + */ + if (hw_lvl == NIX_TXSCH_LVL_TL2 && + (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP)) + strict_prio = NIX_TM_TL1_DFLT_RR_PRIO; + + plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, " + "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)", + nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id, + strict_prio, rr_quantum, node); + + switch (hw_lvl) { + case NIX_TXSCH_LVL_SMQ: + reg[k] = NIX_AF_MDQX_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL4: + reg[k] = NIX_AF_TL4X_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL3: + reg[k] = NIX_AF_TL3X_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL2: + reg[k] = NIX_AF_TL2X_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL1: + reg[k] = NIX_AF_TL1X_SCHEDULE(schq); + regval[k] = rr_quantum; + k++; + + break; + } + + return k; +} + +uint8_t +nix_tm_shaper_reg_prep(struct nix_tm_node *node, + struct nix_tm_shaper_profile *profile, + volatile uint64_t *reg, volatile uint64_t *regval) +{ + struct nix_tm_shaper_data cir, pir; + uint32_t schq = node->hw_id; + uint64_t adjust = 0; + uint8_t k = 0; + + memset(&cir, 0, sizeof(cir)); + memset(&pir, 0, sizeof(pir)); + nix_tm_shaper_conf_get(profile, &cir, &pir); + + if (node->pkt_mode) + adjust = 1; + else if (profile) + adjust = profile->pkt_len_adj; + + plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, " + "pir %" PRIu64 "(%" PRIu64 "B)," + " cir %" PRIu64 "(%" PRIu64 "B)" + "adjust 0x%" PRIx64 "(pktmode %u) (%p)", + nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id, + pir.rate, pir.burst, cir.rate, cir.burst, adjust, + node->pkt_mode, node); + + switch (node->hw_lvl) { + case NIX_TXSCH_LVL_SMQ: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_MDQX_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (nix_tm_shaper2regval(&pir) | 1) : + 0; + k++; + + reg[k] = NIX_AF_MDQX_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (nix_tm_shaper2regval(&cir) | 1) : + 0; + k++; + + /* Configure RED ALG */ + reg[k] = NIX_AF_MDQX_SHAPE(schq); + regval[k] = (adjust | (uint64_t)node->red_algo << 9 | + (uint64_t)node->pkt_mode << 24); + k++; + break; + case NIX_TXSCH_LVL_TL4: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_TL4X_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (nix_tm_shaper2regval(&pir) | 1) : + 0; + k++; + + reg[k] = NIX_AF_TL4X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (nix_tm_shaper2regval(&cir) | 1) : + 0; + k++; + + /* Configure RED algo */ + reg[k] = NIX_AF_TL4X_SHAPE(schq); + regval[k] = (adjust | (uint64_t)node->red_algo << 9 | + (uint64_t)node->pkt_mode << 24); + k++; + break; + case NIX_TXSCH_LVL_TL3: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_TL3X_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (nix_tm_shaper2regval(&pir) | 1) : + 0; + k++; + + reg[k] = NIX_AF_TL3X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (nix_tm_shaper2regval(&cir) | 1) : + 0; + k++; + + /* Configure RED algo */ + reg[k] = NIX_AF_TL3X_SHAPE(schq); + regval[k] = (adjust | (uint64_t)node->red_algo << 9 | + (uint64_t)node->pkt_mode); + k++; + + break; + case NIX_TXSCH_LVL_TL2: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_TL2X_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (nix_tm_shaper2regval(&pir) | 1) : + 0; + k++; + + reg[k] = NIX_AF_TL2X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (nix_tm_shaper2regval(&cir) | 1) : + 0; + k++; + + /* Configure RED algo */ + reg[k] = NIX_AF_TL2X_SHAPE(schq); + regval[k] = (adjust | (uint64_t)node->red_algo << 9 | + (uint64_t)node->pkt_mode << 24); + k++; + + break; + case NIX_TXSCH_LVL_TL1: + /* Configure CIR */ + reg[k] = NIX_AF_TL1X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (nix_tm_shaper2regval(&cir) | 1) : + 0; + k++; + + /* Configure length disable and adjust */ + reg[k] = NIX_AF_TL1X_SHAPE(schq); + regval[k] = (adjust | (uint64_t)node->pkt_mode << 24); + k++; + break; + } + + return k; +} + uint8_t nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable, volatile uint64_t *reg, volatile uint64_t *regval) @@ -183,6 +760,23 @@ nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable, return k; } +/* Search for min rate in topology */ +uint64_t +nix_tm_shaper_profile_rate_min(struct nix *nix) +{ + struct nix_tm_shaper_profile *profile; + uint64_t rate_min = 1E9; /* 1 Gbps */ + + TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) { + if (profile->peak.rate && profile->peak.rate < rate_min) + rate_min = profile->peak.rate; + + if (profile->commit.rate && profile->commit.rate < rate_min) + rate_min = profile->commit.rate; + } + return rate_min; +} + uint16_t nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig) { @@ -208,6 +802,90 @@ nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig) return count; } +uint16_t +nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig, uint16_t *schq, + enum roc_nix_tm_tree tree) +{ + struct nix_tm_node_list *list; + uint8_t contig_cnt, hw_lvl; + struct nix_tm_node *parent; + uint16_t cnt = 0, avail; + + list = nix_tm_node_list(nix, tree); + /* Walk through parents from TL1..TL4 */ + for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) { + TAILQ_FOREACH(parent, list, node) { + if (hw_lvl != parent->hw_lvl) + continue; + + /* Skip accounting for children whose + * parent does not indicate so. + */ + if (!parent->child_realloc) + continue; + + /* Count children needed */ + schq[hw_lvl - 1] += parent->rr_num; + if (parent->max_prio != UINT32_MAX) { + contig_cnt = parent->max_prio + 1; + schq_contig[hw_lvl - 1] += contig_cnt; + /* When we have SP + DWRR at a parent, + * we will always have a spare schq at rr prio + * location in contiguous queues. Hence reduce + * discontiguous count by 1. + */ + if (parent->max_prio > 0 && parent->rr_num) + schq[hw_lvl - 1] -= 1; + } + } + } + + schq[nix->tm_root_lvl] = 1; + if (!nix_tm_have_tl1_access(nix)) + schq[NIX_TXSCH_LVL_TL1] = 1; + + /* Now check for existing resources */ + for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) { + avail = nix_tm_resource_avail(nix, hw_lvl, false); + if (schq[hw_lvl] <= avail) + schq[hw_lvl] = 0; + else + schq[hw_lvl] -= avail; + + /* For contiguous queues, realloc everything */ + avail = nix_tm_resource_avail(nix, hw_lvl, true); + if (schq_contig[hw_lvl] <= avail) + schq_contig[hw_lvl] = 0; + + cnt += schq[hw_lvl]; + cnt += schq_contig[hw_lvl]; + + plt_tm_dbg("Estimate resources needed for %s: dis %u cont %u", + nix_tm_hwlvl2str(hw_lvl), schq[hw_lvl], + schq_contig[hw_lvl]); + } + + return cnt; +} + +uint16_t +roc_nix_tm_leaf_cnt(struct roc_nix *roc_nix) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct nix_tm_node_list *list; + struct nix_tm_node *node; + uint16_t leaf_cnt = 0; + + /* Count leafs only in user list */ + list = nix_tm_node_list(nix, ROC_NIX_TM_USER); + TAILQ_FOREACH(node, list, node) { + if (node->id < nix->nb_tx_queues) + leaf_cnt++; + } + + return leaf_cnt; +} + int roc_nix_tm_node_lvl(struct roc_nix *roc_nix, uint32_t node_id) { @@ -251,6 +929,34 @@ roc_nix_tm_node_next(struct roc_nix *roc_nix, struct roc_nix_tm_node *__prev) return (struct roc_nix_tm_node *)TAILQ_NEXT(prev, node); } +struct roc_nix_tm_shaper_profile * +roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct nix_tm_shaper_profile *profile; + + profile = nix_tm_shaper_profile_search(nix, profile_id); + return (struct roc_nix_tm_shaper_profile *)profile; +} + +struct roc_nix_tm_shaper_profile * +roc_nix_tm_shaper_profile_next(struct roc_nix *roc_nix, + struct roc_nix_tm_shaper_profile *__prev) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct nix_tm_shaper_profile_list *list; + struct nix_tm_shaper_profile *prev; + + prev = (struct nix_tm_shaper_profile *)__prev; + list = &nix->shaper_profile_list; + + /* HEAD of the list */ + if (!prev) + return (struct roc_nix_tm_shaper_profile *)TAILQ_FIRST(list); + + return (struct roc_nix_tm_shaper_profile *)TAILQ_NEXT(prev, shaper); +} + struct nix_tm_node * nix_tm_node_alloc(void) { @@ -272,3 +978,25 @@ nix_tm_node_free(struct nix_tm_node *node) (node->free_fn)(node); } + +struct nix_tm_shaper_profile * +nix_tm_shaper_profile_alloc(void) +{ + struct nix_tm_shaper_profile *profile; + + profile = plt_zmalloc(sizeof(struct nix_tm_shaper_profile), 0); + if (!profile) + return NULL; + + profile->free_fn = plt_free; + return profile; +} + +void +nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile) +{ + if (!profile || !profile->free_fn) + return; + + (profile->free_fn)(profile); +}