1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
11 struct npa_aq_enq_req *req;
12 struct npa_aq_enq_rsp *rsp;
18 plt_tm_dbg("Setting SQ %u SQB aura FC to %s", sq->qid,
19 enable ? "enable" : "disable");
21 lf = idev_npa_obj_get();
23 return NPA_ERR_DEVICE_NOT_BOUNDED;
26 /* Set/clear sqb aura fc_ena */
27 aura_handle = sq->aura_handle;
28 req = mbox_alloc_msg_npa_aq_enq(mbox);
32 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
33 req->ctype = NPA_AQ_CTYPE_AURA;
34 req->op = NPA_AQ_INSTOP_WRITE;
35 /* Below is not needed for aura writes but AF driver needs it */
36 /* AF will translate to associated poolctx */
37 req->aura.pool_addr = req->aura_id;
39 req->aura.fc_ena = enable;
40 req->aura_mask.fc_ena = 1;
42 rc = mbox_process(mbox);
46 /* Read back npa aura ctx */
47 req = mbox_alloc_msg_npa_aq_enq(mbox);
51 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
52 req->ctype = NPA_AQ_CTYPE_AURA;
53 req->op = NPA_AQ_INSTOP_READ;
55 rc = mbox_process_msg(mbox, (void *)&rsp);
59 /* Init when enabled as there might be no triggers */
61 *(volatile uint64_t *)sq->fc = rsp->aura.count;
63 *(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
64 /* Sync write barrier */
70 roc_nix_tm_free_resources(struct roc_nix *roc_nix, bool hw_only)
72 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
74 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA)
77 return nix_tm_free_resources(roc_nix, BIT(ROC_NIX_TM_USER), hw_only);
81 nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
82 struct nix_tm_shaper_profile *profile, int skip_ins)
84 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
85 uint64_t commit_rate, commit_sz;
86 uint64_t peak_rate, peak_sz;
90 commit_rate = profile->commit.rate;
91 commit_sz = profile->commit.size;
92 peak_rate = profile->peak.rate;
93 peak_sz = profile->peak.size;
95 if (nix_tm_shaper_profile_search(nix, id) && !skip_ins)
96 return NIX_ERR_TM_SHAPER_PROFILE_EXISTS;
98 if (profile->pkt_len_adj < NIX_TM_LENGTH_ADJUST_MIN ||
99 profile->pkt_len_adj > NIX_TM_LENGTH_ADJUST_MAX)
100 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
102 /* We cannot support both pkt length adjust and pkt mode */
103 if (profile->pkt_mode && profile->pkt_len_adj)
104 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
106 /* commit rate and burst size can be enabled/disabled */
107 if (commit_rate || commit_sz) {
108 if (commit_sz < NIX_TM_MIN_SHAPER_BURST ||
109 commit_sz > NIX_TM_MAX_SHAPER_BURST)
110 return NIX_ERR_TM_INVALID_COMMIT_SZ;
111 else if (!nix_tm_shaper_rate_conv(commit_rate, NULL, NULL,
113 return NIX_ERR_TM_INVALID_COMMIT_RATE;
116 /* Peak rate and burst size can be enabled/disabled */
117 if (peak_sz || peak_rate) {
118 if (peak_sz < NIX_TM_MIN_SHAPER_BURST ||
119 peak_sz > NIX_TM_MAX_SHAPER_BURST)
120 return NIX_ERR_TM_INVALID_PEAK_SZ;
121 else if (!nix_tm_shaper_rate_conv(peak_rate, NULL, NULL, NULL))
122 return NIX_ERR_TM_INVALID_PEAK_RATE;
126 TAILQ_INSERT_TAIL(&nix->shaper_profile_list, profile, shaper);
128 plt_tm_dbg("Added TM shaper profile %u, "
129 " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
130 ", cbs %" PRIu64 " , adj %u, pkt_mode %u",
131 id, profile->peak.rate, profile->peak.size,
132 profile->commit.rate, profile->commit.size,
133 profile->pkt_len_adj, profile->pkt_mode);
135 /* Always use PIR for single rate shaping */
136 if (!peak_rate && commit_rate) {
137 profile->peak.rate = profile->commit.rate;
138 profile->peak.size = profile->commit.size;
139 profile->commit.rate = 0;
140 profile->commit.size = 0;
143 /* update min rate */
144 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
149 roc_nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
150 struct roc_nix_tm_shaper_profile *roc_profile)
152 struct nix_tm_shaper_profile *profile;
154 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
156 profile->ref_cnt = 0;
157 profile->id = roc_profile->id;
158 if (roc_profile->pkt_mode) {
159 /* Each packet accomulate single count, whereas HW
160 * considers each unit as Byte, so we need convert
163 profile->commit.rate = roc_profile->commit_rate * 8;
164 profile->peak.rate = roc_profile->peak_rate * 8;
166 profile->commit.rate = roc_profile->commit_rate;
167 profile->peak.rate = roc_profile->peak_rate;
169 profile->commit.size = roc_profile->commit_sz;
170 profile->peak.size = roc_profile->peak_sz;
171 profile->pkt_len_adj = roc_profile->pkt_len_adj;
172 profile->pkt_mode = roc_profile->pkt_mode;
173 profile->free_fn = roc_profile->free_fn;
175 return nix_tm_shaper_profile_add(roc_nix, profile, 0);
179 roc_nix_tm_shaper_profile_update(struct roc_nix *roc_nix,
180 struct roc_nix_tm_shaper_profile *roc_profile)
182 struct nix_tm_shaper_profile *profile;
184 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
186 if (roc_profile->pkt_mode) {
187 /* Each packet accomulate single count, whereas HW
188 * considers each unit as Byte, so we need convert
191 profile->commit.rate = roc_profile->commit_rate * 8;
192 profile->peak.rate = roc_profile->peak_rate * 8;
194 profile->commit.rate = roc_profile->commit_rate;
195 profile->peak.rate = roc_profile->peak_rate;
197 profile->commit.size = roc_profile->commit_sz;
198 profile->peak.size = roc_profile->peak_sz;
200 return nix_tm_shaper_profile_add(roc_nix, profile, 1);
204 roc_nix_tm_shaper_profile_delete(struct roc_nix *roc_nix, uint32_t id)
206 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
207 struct nix_tm_shaper_profile *profile;
209 profile = nix_tm_shaper_profile_search(nix, id);
211 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
213 if (profile->ref_cnt)
214 return NIX_ERR_TM_SHAPER_PROFILE_IN_USE;
216 plt_tm_dbg("Removing TM shaper profile %u", id);
217 TAILQ_REMOVE(&nix->shaper_profile_list, profile, shaper);
218 nix_tm_shaper_profile_free(profile);
220 /* update min rate */
221 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
226 roc_nix_tm_node_add(struct roc_nix *roc_nix, struct roc_nix_tm_node *roc_node)
228 struct nix_tm_node *node;
230 node = (struct nix_tm_node *)&roc_node->reserved;
231 node->id = roc_node->id;
232 node->priority = roc_node->priority;
233 node->weight = roc_node->weight;
234 node->lvl = roc_node->lvl;
235 node->parent_id = roc_node->parent_id;
236 node->shaper_profile_id = roc_node->shaper_profile_id;
237 node->pkt_mode = roc_node->pkt_mode;
238 node->pkt_mode_set = roc_node->pkt_mode_set;
239 node->free_fn = roc_node->free_fn;
240 node->tree = ROC_NIX_TM_USER;
242 return nix_tm_node_add(roc_nix, node);
246 roc_nix_tm_node_pkt_mode_update(struct roc_nix *roc_nix, uint32_t node_id,
249 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
250 struct nix_tm_node *node, *child;
251 struct nix_tm_node_list *list;
252 int num_children = 0;
254 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
256 return NIX_ERR_TM_INVALID_NODE;
258 if (node->pkt_mode == pkt_mode) {
259 node->pkt_mode_set = true;
263 /* Check for any existing children, if there are any,
264 * then we cannot update the pkt mode as children's quantum
265 * are already taken in.
267 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
268 TAILQ_FOREACH(child, list, node) {
269 if (child->parent == node)
273 /* Cannot update mode if it has children or tree is enabled */
274 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) && num_children)
277 if (node->pkt_mode_set && num_children)
278 return NIX_ERR_TM_PKT_MODE_MISMATCH;
280 node->pkt_mode = pkt_mode;
281 node->pkt_mode_set = true;
287 roc_nix_tm_node_name_get(struct roc_nix *roc_nix, uint32_t node_id, char *buf,
290 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
291 struct nix_tm_node *node;
293 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
295 plt_strlcpy(buf, "???", buflen);
296 return NIX_ERR_TM_INVALID_NODE;
299 if (node->hw_lvl == NIX_TXSCH_LVL_CNT)
300 snprintf(buf, buflen, "SQ_%d", node->id);
302 snprintf(buf, buflen, "%s_%d", nix_tm_hwlvl2str(node->hw_lvl),
308 roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)
310 return nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);
314 roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
316 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
317 uint16_t sqb_cnt, head_off, tail_off;
318 uint16_t sq_cnt = nix->nb_tx_queues;
319 struct mbox *mbox = (&nix->dev)->mbox;
320 struct nix_tm_node_list *list;
321 enum roc_nix_tm_tree tree;
322 struct nix_tm_node *node;
323 struct roc_nix_sq *sq;
328 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
331 plt_tm_dbg("Disabling hierarchy on %s", nix->pci_dev->name);
334 list = nix_tm_node_list(nix, tree);
336 /* Enable CGX RXTX to drain pkts */
337 if (!roc_nix->io_enabled) {
338 /* Though it enables both RX MCAM Entries and CGX Link
339 * we assume all the rx queues are stopped way back.
341 mbox_alloc_msg_nix_lf_start_rx(mbox);
342 rc = mbox_process(mbox);
344 plt_err("cgx start failed, rc=%d", rc);
350 TAILQ_FOREACH(node, list, node) {
351 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
353 if (!(node->flags & NIX_TM_NODE_HWRES))
356 rc = nix_tm_smq_xoff(nix, node, false);
358 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
364 /* Flush all tx queues */
365 for (i = 0; i < sq_cnt; i++) {
370 rc = roc_nix_tm_sq_aura_fc(sq, false);
372 plt_err("Failed to disable sqb aura fc, rc=%d", rc);
376 /* Wait for sq entries to be flushed */
377 rc = roc_nix_tm_sq_flush_spin(sq);
379 plt_err("Failed to drain sq, rc=%d\n", rc);
384 /* XOFF & Flush all SMQ's. HRM mandates
385 * all SQ's empty before SMQ flush is issued.
387 TAILQ_FOREACH(node, list, node) {
388 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
390 if (!(node->flags & NIX_TM_NODE_HWRES))
393 rc = nix_tm_smq_xoff(nix, node, true);
395 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
400 node->flags &= ~NIX_TM_NODE_ENABLED;
403 /* Verify sanity of all tx queues */
404 for (i = 0; i < sq_cnt; i++) {
409 wdata = ((uint64_t)sq->qid << 32);
410 regaddr = nix->base + NIX_LF_SQ_OP_STATUS;
411 val = roc_atomic64_add_nosync(wdata, (int64_t *)regaddr);
413 sqb_cnt = val & 0xFFFF;
414 head_off = (val >> 20) & 0x3F;
415 tail_off = (val >> 28) & 0x3F;
417 if (sqb_cnt > 1 || head_off != tail_off ||
418 (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))
419 plt_err("Failed to gracefully flush sq %u", sq->qid);
422 nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
424 /* Restore cgx state */
425 if (!roc_nix->io_enabled) {
426 mbox_alloc_msg_nix_lf_stop_rx(mbox);
427 rc |= mbox_process(mbox);
433 roc_nix_tm_hierarchy_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree tree,
436 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
437 struct nix_tm_node_list *list;
438 struct nix_tm_node *node;
439 struct roc_nix_sq *sq;
444 if (tree >= ROC_NIX_TM_TREE_MAX)
445 return NIX_ERR_PARAM;
447 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
448 if (nix->tm_tree != tree)
453 plt_tm_dbg("Enabling hierarchy on %s, xmit_ena %u, tree %u",
454 nix->pci_dev->name, xmit_enable, tree);
456 /* Free hw resources of other trees */
457 tree_mask = NIX_TM_TREE_MASK_ALL;
458 tree_mask &= ~BIT(tree);
460 rc = nix_tm_free_resources(roc_nix, tree_mask, true);
462 plt_err("failed to free resources of other trees, rc=%d", rc);
466 /* Update active tree before starting to do anything */
469 nix_tm_update_parent_info(nix, tree);
471 rc = nix_tm_alloc_txschq(nix, tree);
473 plt_err("TM failed to alloc tm resources=%d", rc);
477 rc = nix_tm_assign_resources(nix, tree);
479 plt_err("TM failed to assign tm resources=%d", rc);
483 rc = nix_tm_txsch_reg_config(nix, tree);
485 plt_err("TM failed to configure sched registers=%d", rc);
489 list = nix_tm_node_list(nix, tree);
490 /* Mark all non-leaf's as enabled */
491 TAILQ_FOREACH(node, list, node) {
492 if (!nix_tm_is_leaf(nix, node->lvl))
493 node->flags |= NIX_TM_NODE_ENABLED;
499 /* Update SQ Sched Data while SQ is idle */
500 TAILQ_FOREACH(node, list, node) {
501 if (!nix_tm_is_leaf(nix, node->lvl))
504 rc = nix_tm_sq_sched_conf(nix, node, false);
506 plt_err("SQ %u sched update failed, rc=%d", node->id,
512 /* Finally XON all SMQ's */
513 TAILQ_FOREACH(node, list, node) {
514 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
517 rc = nix_tm_smq_xoff(nix, node, false);
519 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
525 /* Enable xmit as all the topology is ready */
526 TAILQ_FOREACH(node, list, node) {
527 if (!nix_tm_is_leaf(nix, node->lvl))
531 sq = nix->sqs[sq_id];
533 rc = roc_nix_tm_sq_aura_fc(sq, true);
535 plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
539 node->flags |= NIX_TM_NODE_ENABLED;
543 nix->tm_flags |= NIX_TM_HIERARCHY_ENA;
548 roc_nix_tm_node_suspend_resume(struct roc_nix *roc_nix, uint32_t node_id,
551 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
552 struct mbox *mbox = (&nix->dev)->mbox;
553 struct nix_txschq_config *req;
554 struct nix_tm_node *node;
558 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
560 return NIX_ERR_TM_INVALID_NODE;
563 flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
564 (flags | NIX_TM_NODE_ENABLED);
566 if (node->flags == flags)
569 /* send mbox for state change */
570 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
572 req->lvl = node->hw_lvl;
574 nix_tm_sw_xoff_prep(node, suspend, req->reg, req->regval);
575 rc = mbox_process(mbox);
582 roc_nix_tm_prealloc_res(struct roc_nix *roc_nix, uint8_t lvl,
583 uint16_t discontig, uint16_t contig)
585 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
586 struct mbox *mbox = (&nix->dev)->mbox;
587 struct nix_txsch_alloc_req *req;
588 struct nix_txsch_alloc_rsp *rsp;
592 hw_lvl = nix_tm_lvl2nix(nix, lvl);
593 if (hw_lvl == NIX_TXSCH_LVL_CNT)
596 /* Preallocate contiguous */
597 if (nix->contig_rsvd[hw_lvl] < contig) {
598 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
601 req->schq_contig[hw_lvl] = contig - nix->contig_rsvd[hw_lvl];
603 rc = mbox_process_msg(mbox, (void *)&rsp);
607 nix_tm_copy_rsp_to_nix(nix, rsp);
610 /* Preallocate contiguous */
611 if (nix->discontig_rsvd[hw_lvl] < discontig) {
612 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
615 req->schq[hw_lvl] = discontig - nix->discontig_rsvd[hw_lvl];
617 rc = mbox_process_msg(mbox, (void *)&rsp);
621 nix_tm_copy_rsp_to_nix(nix, rsp);
624 /* Save thresholds */
625 nix->contig_rsvd[hw_lvl] = contig;
626 nix->discontig_rsvd[hw_lvl] = discontig;
627 /* Release anything present above thresholds */
628 nix_tm_release_resources(nix, hw_lvl, true, true);
629 nix_tm_release_resources(nix, hw_lvl, false, true);
634 roc_nix_tm_node_shaper_update(struct roc_nix *roc_nix, uint32_t node_id,
635 uint32_t profile_id, bool force_update)
637 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
638 struct nix_tm_shaper_profile *profile = NULL;
639 struct mbox *mbox = (&nix->dev)->mbox;
640 struct nix_txschq_config *req;
641 struct nix_tm_node *node;
645 /* Shaper updates valid only for user nodes */
646 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
647 if (!node || nix_tm_is_leaf(nix, node->lvl))
648 return NIX_ERR_TM_INVALID_NODE;
650 if (profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE) {
651 profile = nix_tm_shaper_profile_search(nix, profile_id);
653 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
656 /* Pkt mode should match existing node's pkt mode */
657 if (profile && profile->pkt_mode != node->pkt_mode)
658 return NIX_ERR_TM_PKT_MODE_MISMATCH;
660 if ((profile_id == node->shaper_profile_id) && !force_update) {
662 } else if (profile_id != node->shaper_profile_id) {
663 struct nix_tm_shaper_profile *old;
665 /* Find old shaper profile and reduce ref count */
666 old = nix_tm_shaper_profile_search(nix,
667 node->shaper_profile_id);
674 /* Reduce older shaper ref count and increase new one */
675 node->shaper_profile_id = profile_id;
678 /* Nothing to do if hierarchy not yet enabled */
679 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
682 node->flags &= ~NIX_TM_NODE_ENABLED;
684 /* Flush the specific node with SW_XOFF */
685 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
686 req->lvl = node->hw_lvl;
687 k = nix_tm_sw_xoff_prep(node, true, req->reg, req->regval);
690 rc = mbox_process(mbox);
694 /* Update the PIR/CIR and clear SW XOFF */
695 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
696 req->lvl = node->hw_lvl;
698 k = nix_tm_shaper_reg_prep(node, profile, req->reg, req->regval);
700 k += nix_tm_sw_xoff_prep(node, false, &req->reg[k], &req->regval[k]);
703 rc = mbox_process(mbox);
705 node->flags |= NIX_TM_NODE_ENABLED;
710 roc_nix_tm_node_parent_update(struct roc_nix *roc_nix, uint32_t node_id,
711 uint32_t new_parent_id, uint32_t priority,
714 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
715 struct mbox *mbox = (&nix->dev)->mbox;
716 struct nix_tm_node *node, *sibling;
717 struct nix_tm_node *new_parent;
718 struct nix_txschq_config *req;
719 struct nix_tm_node_list *list;
723 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
725 return NIX_ERR_TM_INVALID_NODE;
727 /* Parent id valid only for non root nodes */
728 if (node->hw_lvl != nix->tm_root_lvl) {
730 nix_tm_node_search(nix, new_parent_id, ROC_NIX_TM_USER);
732 return NIX_ERR_TM_INVALID_PARENT;
734 /* Current support is only for dynamic weight update */
735 if (node->parent != new_parent || node->priority != priority)
736 return NIX_ERR_TM_PARENT_PRIO_UPDATE;
739 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
740 /* Skip if no change */
741 if (node->weight == weight)
744 node->weight = weight;
746 /* Nothing to do if hierarchy not yet enabled */
747 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
750 /* For leaf nodes, SQ CTX needs update */
751 if (nix_tm_is_leaf(nix, node->lvl)) {
752 /* Update SQ quantum data on the fly */
753 rc = nix_tm_sq_sched_conf(nix, node, true);
755 return NIX_ERR_TM_SQ_UPDATE_FAIL;
757 /* XOFF Parent node */
758 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
759 req->lvl = node->parent->hw_lvl;
760 req->num_regs = nix_tm_sw_xoff_prep(node->parent, true,
761 req->reg, req->regval);
762 rc = mbox_process(mbox);
766 /* XOFF this node and all other siblings */
767 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
768 req->lvl = node->hw_lvl;
771 TAILQ_FOREACH(sibling, list, node) {
772 if (sibling->parent != node->parent)
774 k += nix_tm_sw_xoff_prep(sibling, true, &req->reg[k],
778 rc = mbox_process(mbox);
782 /* Update new weight for current node */
783 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
784 req->lvl = node->hw_lvl;
786 nix_tm_sched_reg_prep(nix, node, req->reg, req->regval);
787 rc = mbox_process(mbox);
791 /* XON this node and all other siblings */
792 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
793 req->lvl = node->hw_lvl;
796 TAILQ_FOREACH(sibling, list, node) {
797 if (sibling->parent != node->parent)
799 k += nix_tm_sw_xoff_prep(sibling, false, &req->reg[k],
803 rc = mbox_process(mbox);
807 /* XON Parent node */
808 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
809 req->lvl = node->parent->hw_lvl;
810 req->num_regs = nix_tm_sw_xoff_prep(node->parent, false,
811 req->reg, req->regval);
812 rc = mbox_process(mbox);
820 roc_nix_tm_init(struct roc_nix *roc_nix)
822 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
826 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
827 plt_err("Cannot init while existing hierarchy is enabled");
831 /* Free up all user resources already held */
832 tree_mask = NIX_TM_TREE_MASK_ALL;
833 rc = nix_tm_free_resources(roc_nix, tree_mask, false);
835 plt_err("Failed to freeup all nodes and resources, rc=%d", rc);
839 /* Prepare default tree */
840 rc = nix_tm_prepare_default_tree(roc_nix);
842 plt_err("failed to prepare default tm tree, rc=%d", rc);
846 /* Prepare rlimit tree */
847 rc = nix_tm_prepare_rate_limited_tree(roc_nix);
849 plt_err("failed to prepare rlimit tm tree, rc=%d", rc);
857 roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate)
859 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
860 struct nix_tm_shaper_profile profile;
861 struct mbox *mbox = (&nix->dev)->mbox;
862 struct nix_tm_node *node, *parent;
864 volatile uint64_t *reg, *regval;
865 struct nix_txschq_config *req;
870 if (nix->tm_tree != ROC_NIX_TM_RLIMIT ||
871 !(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
872 return NIX_ERR_TM_INVALID_TREE;
874 node = nix_tm_node_search(nix, qid, ROC_NIX_TM_RLIMIT);
876 /* check if we found a valid leaf node */
877 if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
878 node->parent->hw_id == NIX_TM_HW_ID_INVALID)
879 return NIX_ERR_TM_INVALID_NODE;
881 parent = node->parent;
882 flags = parent->flags;
884 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
885 req->lvl = NIX_TXSCH_LVL_MDQ;
887 regval = req->regval;
890 k += nix_tm_sw_xoff_prep(parent, true, ®[k], ®val[k]);
891 flags &= ~NIX_TM_NODE_ENABLED;
895 if (!(flags & NIX_TM_NODE_ENABLED)) {
896 k += nix_tm_sw_xoff_prep(parent, false, ®[k], ®val[k]);
897 flags |= NIX_TM_NODE_ENABLED;
900 /* Use only PIR for rate limit */
901 memset(&profile, 0, sizeof(profile));
902 profile.peak.rate = rate;
903 /* Minimum burst of ~4us Bytes of Tx */
904 profile.peak.size = PLT_MAX((uint64_t)roc_nix_max_pkt_len(roc_nix),
905 (4ul * rate) / ((uint64_t)1E6 * 8));
906 if (!nix->tm_rate_min || nix->tm_rate_min > rate)
907 nix->tm_rate_min = rate;
909 k += nix_tm_shaper_reg_prep(parent, &profile, ®[k], ®val[k]);
912 rc = mbox_process(mbox);
916 parent->flags = flags;
921 roc_nix_tm_fini(struct roc_nix *roc_nix)
923 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
924 struct mbox *mbox = (&nix->dev)->mbox;
925 struct nix_txsch_free_req *req;
930 /* Xmit is assumed to be disabled */
931 /* Free up resources already held */
932 tree_mask = NIX_TM_TREE_MASK_ALL;
933 rc = nix_tm_free_resources(roc_nix, tree_mask, false);
935 plt_err("Failed to freeup existing nodes or rsrcs, rc=%d", rc);
937 /* Free all other hw resources */
938 req = mbox_alloc_msg_nix_txsch_free(mbox);
942 req->flags = TXSCHQ_FREE_ALL;
943 rc = mbox_process(mbox);
945 plt_err("Failed to freeup all res, rc=%d", rc);
947 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
948 plt_bitmap_reset(nix->schq_bmp[hw_lvl]);
949 plt_bitmap_reset(nix->schq_contig_bmp[hw_lvl]);
950 nix->contig_rsvd[hw_lvl] = 0;
951 nix->discontig_rsvd[hw_lvl] = 0;
954 /* Clear shaper profiles */
955 nix_tm_clear_shaper_profiles(nix);
957 nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
961 roc_nix_tm_rsrc_count(struct roc_nix *roc_nix, uint16_t schq[ROC_TM_LVL_MAX])
963 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
964 struct mbox *mbox = (&nix->dev)->mbox;
965 struct free_rsrcs_rsp *rsp;
969 /* Get the current free resources */
970 mbox_alloc_msg_free_rsrc_cnt(mbox);
971 rc = mbox_process_msg(mbox, (void *)&rsp);
975 for (i = 0; i < ROC_TM_LVL_MAX; i++) {
976 hw_lvl = nix_tm_lvl2nix(nix, i);
977 if (hw_lvl == NIX_TXSCH_LVL_CNT)
980 schq[i] = (nix->is_nix1 ? rsp->schq_nix1[hw_lvl] :
988 roc_nix_tm_rsrc_max(bool pf, uint16_t schq[ROC_TM_LVL_MAX])
993 for (i = 0; i < ROC_TM_LVL_MAX; i++) {
994 hw_lvl = pf ? nix_tm_lvl2nix_tl1_root(i) :
995 nix_tm_lvl2nix_tl2_root(i);
998 case NIX_TXSCH_LVL_SMQ:
999 max = (roc_model_is_cn9k() ?
1000 NIX_CN9K_TXSCH_LVL_SMQ_MAX :
1001 NIX_TXSCH_LVL_SMQ_MAX);
1003 case NIX_TXSCH_LVL_TL4:
1004 max = NIX_TXSCH_LVL_TL4_MAX;
1006 case NIX_TXSCH_LVL_TL3:
1007 max = NIX_TXSCH_LVL_TL3_MAX;
1009 case NIX_TXSCH_LVL_TL2:
1010 max = pf ? NIX_TXSCH_LVL_TL2_MAX : 1;
1012 case NIX_TXSCH_LVL_TL1:
1024 roc_nix_tm_root_has_sp(struct roc_nix *roc_nix)
1026 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1028 if (nix->tm_flags & NIX_TM_TL1_NO_SP)