1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
11 struct npa_aq_enq_req *req;
12 struct npa_aq_enq_rsp *rsp;
18 plt_tm_dbg("Setting SQ %u SQB aura FC to %s", sq->qid,
19 enable ? "enable" : "disable");
21 lf = idev_npa_obj_get();
23 return NPA_ERR_DEVICE_NOT_BOUNDED;
26 /* Set/clear sqb aura fc_ena */
27 aura_handle = sq->aura_handle;
28 req = mbox_alloc_msg_npa_aq_enq(mbox);
32 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
33 req->ctype = NPA_AQ_CTYPE_AURA;
34 req->op = NPA_AQ_INSTOP_WRITE;
35 /* Below is not needed for aura writes but AF driver needs it */
36 /* AF will translate to associated poolctx */
37 req->aura.pool_addr = req->aura_id;
39 req->aura.fc_ena = enable;
40 req->aura_mask.fc_ena = 1;
41 if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0()) {
42 req->aura.fc_stype = 0x0; /* STF */
43 req->aura_mask.fc_stype = 0x0; /* STF */
45 req->aura.fc_stype = 0x3; /* STSTP */
46 req->aura_mask.fc_stype = 0x3; /* STSTP */
49 rc = mbox_process(mbox);
53 /* Read back npa aura ctx */
54 req = mbox_alloc_msg_npa_aq_enq(mbox);
58 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
59 req->ctype = NPA_AQ_CTYPE_AURA;
60 req->op = NPA_AQ_INSTOP_READ;
62 rc = mbox_process_msg(mbox, (void *)&rsp);
66 /* Init when enabled as there might be no triggers */
68 *(volatile uint64_t *)sq->fc = rsp->aura.count;
70 *(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
71 /* Sync write barrier */
77 roc_nix_tm_free_resources(struct roc_nix *roc_nix, bool hw_only)
79 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
81 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA)
84 return nix_tm_free_resources(roc_nix, BIT(ROC_NIX_TM_USER), hw_only);
88 nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)
90 uint64_t min_rate = profile->commit.rate;
92 if (!profile->pkt_mode)
95 profile->pkt_mode_adj = 1;
97 if (profile->commit.rate &&
98 (profile->commit.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
99 profile->commit.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
100 return NIX_ERR_TM_INVALID_COMMIT_RATE;
102 if (profile->peak.rate &&
103 (profile->peak.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
104 profile->peak.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
105 return NIX_ERR_TM_INVALID_PEAK_RATE;
107 if (profile->peak.rate && min_rate > profile->peak.rate)
108 min_rate = profile->peak.rate;
110 /* Each packet accomulate single count, whereas HW
111 * considers each unit as Byte, so we need convert
114 profile->commit.rate = profile->commit.rate * 8;
115 profile->peak.rate = profile->peak.rate * 8;
116 min_rate = min_rate * 8;
118 if (min_rate && (min_rate < NIX_TM_MIN_SHAPER_RATE)) {
119 int adjust = NIX_TM_MIN_SHAPER_RATE / min_rate;
121 if (adjust > NIX_TM_LENGTH_ADJUST_MAX)
122 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
124 profile->pkt_mode_adj += adjust;
125 profile->commit.rate += (adjust * profile->commit.rate);
126 profile->peak.rate += (adjust * profile->peak.rate);
133 nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
134 struct nix_tm_shaper_profile *profile, int skip_ins)
136 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
137 uint64_t commit_rate, commit_sz;
138 uint64_t min_burst, max_burst;
139 uint64_t peak_rate, peak_sz;
144 rc = nix_tm_adjust_shaper_pps_rate(profile);
148 commit_rate = profile->commit.rate;
149 commit_sz = profile->commit.size;
150 peak_rate = profile->peak.rate;
151 peak_sz = profile->peak.size;
153 min_burst = NIX_TM_MIN_SHAPER_BURST;
154 max_burst = roc_nix_tm_max_shaper_burst_get();
156 if (nix_tm_shaper_profile_search(nix, id) && !skip_ins)
157 return NIX_ERR_TM_SHAPER_PROFILE_EXISTS;
159 if (profile->pkt_len_adj < NIX_TM_LENGTH_ADJUST_MIN ||
160 profile->pkt_len_adj > NIX_TM_LENGTH_ADJUST_MAX)
161 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
163 /* We cannot support both pkt length adjust and pkt mode */
164 if (profile->pkt_mode && profile->pkt_len_adj)
165 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
167 /* commit rate and burst size can be enabled/disabled */
168 if (commit_rate || commit_sz) {
169 if (commit_sz < min_burst || commit_sz > max_burst)
170 return NIX_ERR_TM_INVALID_COMMIT_SZ;
171 else if (!nix_tm_shaper_rate_conv(commit_rate, NULL, NULL,
173 return NIX_ERR_TM_INVALID_COMMIT_RATE;
176 /* Peak rate and burst size can be enabled/disabled */
177 if (peak_sz || peak_rate) {
178 if (peak_sz < min_burst || peak_sz > max_burst)
179 return NIX_ERR_TM_INVALID_PEAK_SZ;
180 else if (!nix_tm_shaper_rate_conv(peak_rate, NULL, NULL, NULL))
181 return NIX_ERR_TM_INVALID_PEAK_RATE;
185 TAILQ_INSERT_TAIL(&nix->shaper_profile_list, profile, shaper);
187 plt_tm_dbg("Added TM shaper profile %u, "
188 " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
189 ", cbs %" PRIu64 " , adj %u, pkt_mode %u",
190 id, profile->peak.rate, profile->peak.size,
191 profile->commit.rate, profile->commit.size,
192 profile->pkt_len_adj, profile->pkt_mode);
194 /* Always use PIR for single rate shaping */
195 if (!peak_rate && commit_rate) {
196 profile->peak.rate = profile->commit.rate;
197 profile->peak.size = profile->commit.size;
198 profile->commit.rate = 0;
199 profile->commit.size = 0;
202 /* update min rate */
203 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
208 roc_nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
209 struct roc_nix_tm_shaper_profile *roc_profile)
211 struct nix_tm_shaper_profile *profile;
213 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
215 profile->ref_cnt = 0;
216 profile->id = roc_profile->id;
217 profile->commit.rate = roc_profile->commit_rate;
218 profile->peak.rate = roc_profile->peak_rate;
219 profile->commit.size = roc_profile->commit_sz;
220 profile->peak.size = roc_profile->peak_sz;
221 profile->pkt_len_adj = roc_profile->pkt_len_adj;
222 profile->pkt_mode = roc_profile->pkt_mode;
223 profile->free_fn = roc_profile->free_fn;
225 return nix_tm_shaper_profile_add(roc_nix, profile, 0);
229 roc_nix_tm_shaper_profile_update(struct roc_nix *roc_nix,
230 struct roc_nix_tm_shaper_profile *roc_profile)
232 struct nix_tm_shaper_profile *profile;
234 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
236 profile->commit.rate = roc_profile->commit_rate;
237 profile->peak.rate = roc_profile->peak_rate;
238 profile->commit.size = roc_profile->commit_sz;
239 profile->peak.size = roc_profile->peak_sz;
241 return nix_tm_shaper_profile_add(roc_nix, profile, 1);
245 roc_nix_tm_shaper_profile_delete(struct roc_nix *roc_nix, uint32_t id)
247 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
248 struct nix_tm_shaper_profile *profile;
250 profile = nix_tm_shaper_profile_search(nix, id);
252 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
254 if (profile->ref_cnt)
255 return NIX_ERR_TM_SHAPER_PROFILE_IN_USE;
257 plt_tm_dbg("Removing TM shaper profile %u", id);
258 TAILQ_REMOVE(&nix->shaper_profile_list, profile, shaper);
259 nix_tm_shaper_profile_free(profile);
261 /* update min rate */
262 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
267 roc_nix_tm_node_add(struct roc_nix *roc_nix, struct roc_nix_tm_node *roc_node)
269 struct nix_tm_node *node;
271 node = (struct nix_tm_node *)&roc_node->reserved;
272 node->id = roc_node->id;
273 node->priority = roc_node->priority;
274 node->weight = roc_node->weight;
275 node->lvl = roc_node->lvl;
276 node->parent_id = roc_node->parent_id;
277 node->shaper_profile_id = roc_node->shaper_profile_id;
278 node->pkt_mode = roc_node->pkt_mode;
279 node->pkt_mode_set = roc_node->pkt_mode_set;
280 node->free_fn = roc_node->free_fn;
281 node->tree = ROC_NIX_TM_USER;
283 return nix_tm_node_add(roc_nix, node);
287 roc_nix_tm_node_pkt_mode_update(struct roc_nix *roc_nix, uint32_t node_id,
290 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
291 struct nix_tm_node *node, *child;
292 struct nix_tm_node_list *list;
293 int num_children = 0;
295 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
297 return NIX_ERR_TM_INVALID_NODE;
299 if (node->pkt_mode == pkt_mode) {
300 node->pkt_mode_set = true;
304 /* Check for any existing children, if there are any,
305 * then we cannot update the pkt mode as children's quantum
306 * are already taken in.
308 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
309 TAILQ_FOREACH(child, list, node) {
310 if (child->parent == node)
314 /* Cannot update mode if it has children or tree is enabled */
315 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) && num_children)
318 if (node->pkt_mode_set && num_children)
319 return NIX_ERR_TM_PKT_MODE_MISMATCH;
321 node->pkt_mode = pkt_mode;
322 node->pkt_mode_set = true;
328 roc_nix_tm_node_name_get(struct roc_nix *roc_nix, uint32_t node_id, char *buf,
331 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
332 struct nix_tm_node *node;
334 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
336 plt_strlcpy(buf, "???", buflen);
337 return NIX_ERR_TM_INVALID_NODE;
340 if (node->hw_lvl == NIX_TXSCH_LVL_CNT)
341 snprintf(buf, buflen, "SQ_%d", node->id);
343 snprintf(buf, buflen, "%s_%d", nix_tm_hwlvl2str(node->hw_lvl),
349 roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)
351 return nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);
355 roc_nix_smq_flush(struct roc_nix *roc_nix)
357 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
358 struct nix_tm_node_list *list;
359 enum roc_nix_tm_tree tree;
360 struct nix_tm_node *node;
363 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
367 list = nix_tm_node_list(nix, tree);
369 /* XOFF & Flush all SMQ's. HRM mandates
370 * all SQ's empty before SMQ flush is issued.
372 TAILQ_FOREACH(node, list, node) {
373 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
375 if (!(node->flags & NIX_TM_NODE_HWRES))
378 rc = nix_tm_smq_xoff(nix, node, true);
380 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
387 TAILQ_FOREACH(node, list, node) {
388 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
390 if (!(node->flags & NIX_TM_NODE_HWRES))
393 rc = nix_tm_smq_xoff(nix, node, false);
395 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
405 roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
407 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
408 uint16_t sqb_cnt, head_off, tail_off;
409 uint16_t sq_cnt = nix->nb_tx_queues;
410 struct mbox *mbox = (&nix->dev)->mbox;
411 struct nix_tm_node_list *list;
412 enum roc_nix_tm_tree tree;
413 struct nix_tm_node *node;
414 struct roc_nix_sq *sq;
419 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
422 plt_tm_dbg("Disabling hierarchy on %s", nix->pci_dev->name);
425 list = nix_tm_node_list(nix, tree);
427 /* Enable CGX RXTX to drain pkts */
428 if (!roc_nix->io_enabled) {
429 /* Though it enables both RX MCAM Entries and CGX Link
430 * we assume all the rx queues are stopped way back.
432 mbox_alloc_msg_nix_lf_start_rx(mbox);
433 rc = mbox_process(mbox);
435 plt_err("cgx start failed, rc=%d", rc);
441 TAILQ_FOREACH(node, list, node) {
442 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
444 if (!(node->flags & NIX_TM_NODE_HWRES))
447 rc = nix_tm_smq_xoff(nix, node, false);
449 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
455 /* Flush all tx queues */
456 for (i = 0; i < sq_cnt; i++) {
461 rc = roc_nix_tm_sq_aura_fc(sq, false);
463 plt_err("Failed to disable sqb aura fc, rc=%d", rc);
467 /* Wait for sq entries to be flushed */
468 rc = roc_nix_tm_sq_flush_spin(sq);
470 plt_err("Failed to drain sq, rc=%d\n", rc);
475 /* XOFF & Flush all SMQ's. HRM mandates
476 * all SQ's empty before SMQ flush is issued.
478 TAILQ_FOREACH(node, list, node) {
479 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
481 if (!(node->flags & NIX_TM_NODE_HWRES))
484 rc = nix_tm_smq_xoff(nix, node, true);
486 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
491 node->flags &= ~NIX_TM_NODE_ENABLED;
494 /* Verify sanity of all tx queues */
495 for (i = 0; i < sq_cnt; i++) {
500 wdata = ((uint64_t)sq->qid << 32);
501 regaddr = nix->base + NIX_LF_SQ_OP_STATUS;
502 val = roc_atomic64_add_nosync(wdata, (int64_t *)regaddr);
504 sqb_cnt = val & 0xFFFF;
505 head_off = (val >> 20) & 0x3F;
506 tail_off = (val >> 28) & 0x3F;
508 if (sqb_cnt > 1 || head_off != tail_off ||
509 (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))
510 plt_err("Failed to gracefully flush sq %u", sq->qid);
513 nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
515 /* Restore cgx state */
516 if (!roc_nix->io_enabled) {
517 mbox_alloc_msg_nix_lf_stop_rx(mbox);
518 rc |= mbox_process(mbox);
524 roc_nix_tm_hierarchy_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree tree,
527 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
528 struct nix_tm_node_list *list;
529 struct nix_tm_node *node;
530 struct roc_nix_sq *sq;
535 if (tree >= ROC_NIX_TM_TREE_MAX)
536 return NIX_ERR_PARAM;
538 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
539 if (nix->tm_tree != tree)
544 plt_tm_dbg("Enabling hierarchy on %s, xmit_ena %u, tree %u",
545 nix->pci_dev->name, xmit_enable, tree);
547 /* Free hw resources of other trees */
548 tree_mask = NIX_TM_TREE_MASK_ALL;
549 tree_mask &= ~BIT(tree);
551 rc = nix_tm_free_resources(roc_nix, tree_mask, true);
553 plt_err("failed to free resources of other trees, rc=%d", rc);
557 /* Update active tree before starting to do anything */
560 nix_tm_update_parent_info(nix, tree);
562 rc = nix_tm_alloc_txschq(nix, tree);
564 plt_err("TM failed to alloc tm resources=%d", rc);
568 rc = nix_tm_assign_resources(nix, tree);
570 plt_err("TM failed to assign tm resources=%d", rc);
574 rc = nix_tm_txsch_reg_config(nix, tree);
576 plt_err("TM failed to configure sched registers=%d", rc);
580 list = nix_tm_node_list(nix, tree);
581 /* Mark all non-leaf's as enabled */
582 TAILQ_FOREACH(node, list, node) {
583 if (!nix_tm_is_leaf(nix, node->lvl))
584 node->flags |= NIX_TM_NODE_ENABLED;
590 /* Update SQ Sched Data while SQ is idle */
591 TAILQ_FOREACH(node, list, node) {
592 if (!nix_tm_is_leaf(nix, node->lvl))
595 rc = nix_tm_sq_sched_conf(nix, node, false);
597 plt_err("SQ %u sched update failed, rc=%d", node->id,
603 /* Finally XON all SMQ's */
604 TAILQ_FOREACH(node, list, node) {
605 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
608 rc = nix_tm_smq_xoff(nix, node, false);
610 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
616 /* Enable xmit as all the topology is ready */
617 TAILQ_FOREACH(node, list, node) {
618 if (!nix_tm_is_leaf(nix, node->lvl))
622 sq = nix->sqs[sq_id];
624 rc = roc_nix_tm_sq_aura_fc(sq, true);
626 plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
630 node->flags |= NIX_TM_NODE_ENABLED;
634 nix->tm_flags |= NIX_TM_HIERARCHY_ENA;
639 roc_nix_tm_node_suspend_resume(struct roc_nix *roc_nix, uint32_t node_id,
642 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
643 struct mbox *mbox = (&nix->dev)->mbox;
644 struct nix_txschq_config *req;
645 struct nix_tm_node *node;
649 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
651 return NIX_ERR_TM_INVALID_NODE;
654 flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
655 (flags | NIX_TM_NODE_ENABLED);
657 if (node->flags == flags)
660 /* send mbox for state change */
661 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
663 req->lvl = node->hw_lvl;
665 nix_tm_sw_xoff_prep(node, suspend, req->reg, req->regval);
666 rc = mbox_process(mbox);
673 roc_nix_tm_prealloc_res(struct roc_nix *roc_nix, uint8_t lvl,
674 uint16_t discontig, uint16_t contig)
676 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
677 struct mbox *mbox = (&nix->dev)->mbox;
678 struct nix_txsch_alloc_req *req;
679 struct nix_txsch_alloc_rsp *rsp;
683 hw_lvl = nix_tm_lvl2nix(nix, lvl);
684 if (hw_lvl == NIX_TXSCH_LVL_CNT)
687 /* Preallocate contiguous */
688 if (nix->contig_rsvd[hw_lvl] < contig) {
689 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
692 req->schq_contig[hw_lvl] = contig - nix->contig_rsvd[hw_lvl];
694 rc = mbox_process_msg(mbox, (void *)&rsp);
698 nix_tm_copy_rsp_to_nix(nix, rsp);
701 /* Preallocate contiguous */
702 if (nix->discontig_rsvd[hw_lvl] < discontig) {
703 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
706 req->schq[hw_lvl] = discontig - nix->discontig_rsvd[hw_lvl];
708 rc = mbox_process_msg(mbox, (void *)&rsp);
712 nix_tm_copy_rsp_to_nix(nix, rsp);
715 /* Save thresholds */
716 nix->contig_rsvd[hw_lvl] = contig;
717 nix->discontig_rsvd[hw_lvl] = discontig;
718 /* Release anything present above thresholds */
719 nix_tm_release_resources(nix, hw_lvl, true, true);
720 nix_tm_release_resources(nix, hw_lvl, false, true);
725 roc_nix_tm_node_shaper_update(struct roc_nix *roc_nix, uint32_t node_id,
726 uint32_t profile_id, bool force_update)
728 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
729 struct nix_tm_shaper_profile *profile = NULL;
730 struct mbox *mbox = (&nix->dev)->mbox;
731 struct nix_txschq_config *req;
732 struct nix_tm_node *node;
736 /* Shaper updates valid only for user nodes */
737 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
738 if (!node || nix_tm_is_leaf(nix, node->lvl))
739 return NIX_ERR_TM_INVALID_NODE;
741 if (profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE) {
742 profile = nix_tm_shaper_profile_search(nix, profile_id);
744 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
747 /* Pkt mode should match existing node's pkt mode */
748 if (profile && profile->pkt_mode != node->pkt_mode)
749 return NIX_ERR_TM_PKT_MODE_MISMATCH;
751 if ((profile_id == node->shaper_profile_id) && !force_update) {
753 } else if (profile_id != node->shaper_profile_id) {
754 struct nix_tm_shaper_profile *old;
756 /* Find old shaper profile and reduce ref count */
757 old = nix_tm_shaper_profile_search(nix,
758 node->shaper_profile_id);
765 /* Reduce older shaper ref count and increase new one */
766 node->shaper_profile_id = profile_id;
769 /* Nothing to do if hierarchy not yet enabled */
770 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
773 node->flags &= ~NIX_TM_NODE_ENABLED;
775 /* Flush the specific node with SW_XOFF */
776 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
777 req->lvl = node->hw_lvl;
778 k = nix_tm_sw_xoff_prep(node, true, req->reg, req->regval);
781 rc = mbox_process(mbox);
785 /* Update the PIR/CIR and clear SW XOFF */
786 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
787 req->lvl = node->hw_lvl;
789 k = nix_tm_shaper_reg_prep(node, profile, req->reg, req->regval);
791 k += nix_tm_sw_xoff_prep(node, false, &req->reg[k], &req->regval[k]);
794 rc = mbox_process(mbox);
796 node->flags |= NIX_TM_NODE_ENABLED;
801 roc_nix_tm_node_parent_update(struct roc_nix *roc_nix, uint32_t node_id,
802 uint32_t new_parent_id, uint32_t priority,
805 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
806 struct mbox *mbox = (&nix->dev)->mbox;
807 struct nix_tm_node *node, *sibling;
808 struct nix_tm_node *new_parent;
809 struct nix_txschq_config *req;
810 struct nix_tm_node_list *list;
814 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
816 return NIX_ERR_TM_INVALID_NODE;
818 /* Parent id valid only for non root nodes */
819 if (node->hw_lvl != nix->tm_root_lvl) {
821 nix_tm_node_search(nix, new_parent_id, ROC_NIX_TM_USER);
823 return NIX_ERR_TM_INVALID_PARENT;
825 /* Current support is only for dynamic weight update */
826 if (node->parent != new_parent || node->priority != priority)
827 return NIX_ERR_TM_PARENT_PRIO_UPDATE;
830 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
831 /* Skip if no change */
832 if (node->weight == weight)
835 node->weight = weight;
837 /* Nothing to do if hierarchy not yet enabled */
838 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
841 /* For leaf nodes, SQ CTX needs update */
842 if (nix_tm_is_leaf(nix, node->lvl)) {
843 /* Update SQ quantum data on the fly */
844 rc = nix_tm_sq_sched_conf(nix, node, true);
846 return NIX_ERR_TM_SQ_UPDATE_FAIL;
848 /* XOFF Parent node */
849 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
850 req->lvl = node->parent->hw_lvl;
851 req->num_regs = nix_tm_sw_xoff_prep(node->parent, true,
852 req->reg, req->regval);
853 rc = mbox_process(mbox);
857 /* XOFF this node and all other siblings */
858 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
859 req->lvl = node->hw_lvl;
862 TAILQ_FOREACH(sibling, list, node) {
863 if (sibling->parent != node->parent)
865 k += nix_tm_sw_xoff_prep(sibling, true, &req->reg[k],
869 rc = mbox_process(mbox);
873 /* Update new weight for current node */
874 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
875 req->lvl = node->hw_lvl;
877 nix_tm_sched_reg_prep(nix, node, req->reg, req->regval);
878 rc = mbox_process(mbox);
882 /* XON this node and all other siblings */
883 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
884 req->lvl = node->hw_lvl;
887 TAILQ_FOREACH(sibling, list, node) {
888 if (sibling->parent != node->parent)
890 k += nix_tm_sw_xoff_prep(sibling, false, &req->reg[k],
894 rc = mbox_process(mbox);
898 /* XON Parent node */
899 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
900 req->lvl = node->parent->hw_lvl;
901 req->num_regs = nix_tm_sw_xoff_prep(node->parent, false,
902 req->reg, req->regval);
903 rc = mbox_process(mbox);
911 roc_nix_tm_init(struct roc_nix *roc_nix)
913 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
917 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
918 plt_err("Cannot init while existing hierarchy is enabled");
922 /* Free up all user resources already held */
923 tree_mask = NIX_TM_TREE_MASK_ALL;
924 rc = nix_tm_free_resources(roc_nix, tree_mask, false);
926 plt_err("Failed to freeup all nodes and resources, rc=%d", rc);
930 /* Prepare default tree */
931 rc = nix_tm_prepare_default_tree(roc_nix);
933 plt_err("failed to prepare default tm tree, rc=%d", rc);
941 roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate)
943 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
944 struct nix_tm_shaper_profile profile;
945 struct mbox *mbox = (&nix->dev)->mbox;
946 struct nix_tm_node *node, *parent;
948 volatile uint64_t *reg, *regval;
949 struct nix_txschq_config *req;
954 if ((nix->tm_tree == ROC_NIX_TM_USER) ||
955 !(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
956 return NIX_ERR_TM_INVALID_TREE;
958 node = nix_tm_node_search(nix, qid, nix->tm_tree);
960 /* check if we found a valid leaf node */
961 if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
962 node->parent->hw_id == NIX_TM_HW_ID_INVALID)
963 return NIX_ERR_TM_INVALID_NODE;
965 parent = node->parent;
966 flags = parent->flags;
968 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
969 req->lvl = NIX_TXSCH_LVL_MDQ;
971 regval = req->regval;
974 k += nix_tm_sw_xoff_prep(parent, true, ®[k], ®val[k]);
975 flags &= ~NIX_TM_NODE_ENABLED;
979 if (!(flags & NIX_TM_NODE_ENABLED)) {
980 k += nix_tm_sw_xoff_prep(parent, false, ®[k], ®val[k]);
981 flags |= NIX_TM_NODE_ENABLED;
984 /* Use only PIR for rate limit */
985 memset(&profile, 0, sizeof(profile));
986 profile.peak.rate = rate;
987 /* Minimum burst of ~4us Bytes of Tx */
988 profile.peak.size = PLT_MAX((uint64_t)roc_nix_max_pkt_len(roc_nix),
989 (4ul * rate) / ((uint64_t)1E6 * 8));
990 if (!nix->tm_rate_min || nix->tm_rate_min > rate)
991 nix->tm_rate_min = rate;
993 k += nix_tm_shaper_reg_prep(parent, &profile, ®[k], ®val[k]);
996 rc = mbox_process(mbox);
1000 parent->flags = flags;
1005 roc_nix_tm_fini(struct roc_nix *roc_nix)
1007 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1008 struct mbox *mbox = (&nix->dev)->mbox;
1009 struct nix_txsch_free_req *req;
1014 /* Xmit is assumed to be disabled */
1015 /* Free up resources already held */
1016 tree_mask = NIX_TM_TREE_MASK_ALL;
1017 rc = nix_tm_free_resources(roc_nix, tree_mask, false);
1019 plt_err("Failed to freeup existing nodes or rsrcs, rc=%d", rc);
1021 /* Free all other hw resources */
1022 req = mbox_alloc_msg_nix_txsch_free(mbox);
1026 req->flags = TXSCHQ_FREE_ALL;
1027 rc = mbox_process(mbox);
1029 plt_err("Failed to freeup all res, rc=%d", rc);
1031 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1032 plt_bitmap_reset(nix->schq_bmp[hw_lvl]);
1033 plt_bitmap_reset(nix->schq_contig_bmp[hw_lvl]);
1034 nix->contig_rsvd[hw_lvl] = 0;
1035 nix->discontig_rsvd[hw_lvl] = 0;
1038 /* Clear shaper profiles */
1039 nix_tm_clear_shaper_profiles(nix);
1041 nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
1045 roc_nix_tm_rsrc_count(struct roc_nix *roc_nix, uint16_t schq[ROC_TM_LVL_MAX])
1047 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1048 struct mbox *mbox = (&nix->dev)->mbox;
1049 struct free_rsrcs_rsp *rsp;
1053 /* Get the current free resources */
1054 mbox_alloc_msg_free_rsrc_cnt(mbox);
1055 rc = mbox_process_msg(mbox, (void *)&rsp);
1059 for (i = 0; i < ROC_TM_LVL_MAX; i++) {
1060 hw_lvl = nix_tm_lvl2nix(nix, i);
1061 if (hw_lvl == NIX_TXSCH_LVL_CNT)
1064 schq[i] = (nix->is_nix1 ? rsp->schq_nix1[hw_lvl] :
1072 roc_nix_tm_rsrc_max(bool pf, uint16_t schq[ROC_TM_LVL_MAX])
1077 for (i = 0; i < ROC_TM_LVL_MAX; i++) {
1078 hw_lvl = pf ? nix_tm_lvl2nix_tl1_root(i) :
1079 nix_tm_lvl2nix_tl2_root(i);
1082 case NIX_TXSCH_LVL_SMQ:
1083 max = (roc_model_is_cn9k() ?
1084 NIX_CN9K_TXSCH_LVL_SMQ_MAX :
1085 NIX_TXSCH_LVL_SMQ_MAX);
1087 case NIX_TXSCH_LVL_TL4:
1088 max = NIX_TXSCH_LVL_TL4_MAX;
1090 case NIX_TXSCH_LVL_TL3:
1091 max = NIX_TXSCH_LVL_TL3_MAX;
1093 case NIX_TXSCH_LVL_TL2:
1094 max = pf ? NIX_TXSCH_LVL_TL2_MAX : 1;
1096 case NIX_TXSCH_LVL_TL1:
1108 roc_nix_tm_root_has_sp(struct roc_nix *roc_nix)
1110 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1112 if (nix->tm_flags & NIX_TM_TL1_NO_SP)