1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
11 struct npa_aq_enq_req *req;
12 struct npa_aq_enq_rsp *rsp;
18 plt_tm_dbg("Setting SQ %u SQB aura FC to %s", sq->qid,
19 enable ? "enable" : "disable");
21 lf = idev_npa_obj_get();
23 return NPA_ERR_DEVICE_NOT_BOUNDED;
26 /* Set/clear sqb aura fc_ena */
27 aura_handle = sq->aura_handle;
28 req = mbox_alloc_msg_npa_aq_enq(mbox);
32 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
33 req->ctype = NPA_AQ_CTYPE_AURA;
34 req->op = NPA_AQ_INSTOP_WRITE;
35 /* Below is not needed for aura writes but AF driver needs it */
36 /* AF will translate to associated poolctx */
37 req->aura.pool_addr = req->aura_id;
39 req->aura.fc_ena = enable;
40 req->aura_mask.fc_ena = 1;
41 if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0()) {
42 req->aura.fc_stype = 0x0; /* STF */
43 req->aura_mask.fc_stype = 0x0; /* STF */
45 req->aura.fc_stype = 0x3; /* STSTP */
46 req->aura_mask.fc_stype = 0x3; /* STSTP */
49 rc = mbox_process(mbox);
53 /* Read back npa aura ctx */
54 req = mbox_alloc_msg_npa_aq_enq(mbox);
58 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
59 req->ctype = NPA_AQ_CTYPE_AURA;
60 req->op = NPA_AQ_INSTOP_READ;
62 rc = mbox_process_msg(mbox, (void *)&rsp);
66 /* Init when enabled as there might be no triggers */
68 *(volatile uint64_t *)sq->fc = rsp->aura.count;
70 *(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
71 /* Sync write barrier */
77 roc_nix_tm_free_resources(struct roc_nix *roc_nix, bool hw_only)
79 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
81 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA)
84 return nix_tm_free_resources(roc_nix, BIT(ROC_NIX_TM_USER), hw_only);
88 nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
89 struct nix_tm_shaper_profile *profile, int skip_ins)
91 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
92 uint64_t commit_rate, commit_sz;
93 uint64_t peak_rate, peak_sz;
97 commit_rate = profile->commit.rate;
98 commit_sz = profile->commit.size;
99 peak_rate = profile->peak.rate;
100 peak_sz = profile->peak.size;
102 if (nix_tm_shaper_profile_search(nix, id) && !skip_ins)
103 return NIX_ERR_TM_SHAPER_PROFILE_EXISTS;
105 if (profile->pkt_len_adj < NIX_TM_LENGTH_ADJUST_MIN ||
106 profile->pkt_len_adj > NIX_TM_LENGTH_ADJUST_MAX)
107 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
109 /* We cannot support both pkt length adjust and pkt mode */
110 if (profile->pkt_mode && profile->pkt_len_adj)
111 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
113 /* commit rate and burst size can be enabled/disabled */
114 if (commit_rate || commit_sz) {
115 if (commit_sz < NIX_TM_MIN_SHAPER_BURST ||
116 commit_sz > NIX_TM_MAX_SHAPER_BURST)
117 return NIX_ERR_TM_INVALID_COMMIT_SZ;
118 else if (!nix_tm_shaper_rate_conv(commit_rate, NULL, NULL,
120 return NIX_ERR_TM_INVALID_COMMIT_RATE;
123 /* Peak rate and burst size can be enabled/disabled */
124 if (peak_sz || peak_rate) {
125 if (peak_sz < NIX_TM_MIN_SHAPER_BURST ||
126 peak_sz > NIX_TM_MAX_SHAPER_BURST)
127 return NIX_ERR_TM_INVALID_PEAK_SZ;
128 else if (!nix_tm_shaper_rate_conv(peak_rate, NULL, NULL, NULL))
129 return NIX_ERR_TM_INVALID_PEAK_RATE;
133 TAILQ_INSERT_TAIL(&nix->shaper_profile_list, profile, shaper);
135 plt_tm_dbg("Added TM shaper profile %u, "
136 " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
137 ", cbs %" PRIu64 " , adj %u, pkt_mode %u",
138 id, profile->peak.rate, profile->peak.size,
139 profile->commit.rate, profile->commit.size,
140 profile->pkt_len_adj, profile->pkt_mode);
142 /* Always use PIR for single rate shaping */
143 if (!peak_rate && commit_rate) {
144 profile->peak.rate = profile->commit.rate;
145 profile->peak.size = profile->commit.size;
146 profile->commit.rate = 0;
147 profile->commit.size = 0;
150 /* update min rate */
151 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
156 roc_nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
157 struct roc_nix_tm_shaper_profile *roc_profile)
159 struct nix_tm_shaper_profile *profile;
161 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
163 profile->ref_cnt = 0;
164 profile->id = roc_profile->id;
165 if (roc_profile->pkt_mode) {
166 /* Each packet accomulate single count, whereas HW
167 * considers each unit as Byte, so we need convert
170 profile->commit.rate = roc_profile->commit_rate * 8;
171 profile->peak.rate = roc_profile->peak_rate * 8;
173 profile->commit.rate = roc_profile->commit_rate;
174 profile->peak.rate = roc_profile->peak_rate;
176 profile->commit.size = roc_profile->commit_sz;
177 profile->peak.size = roc_profile->peak_sz;
178 profile->pkt_len_adj = roc_profile->pkt_len_adj;
179 profile->pkt_mode = roc_profile->pkt_mode;
180 profile->free_fn = roc_profile->free_fn;
182 return nix_tm_shaper_profile_add(roc_nix, profile, 0);
186 roc_nix_tm_shaper_profile_update(struct roc_nix *roc_nix,
187 struct roc_nix_tm_shaper_profile *roc_profile)
189 struct nix_tm_shaper_profile *profile;
191 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
193 if (roc_profile->pkt_mode) {
194 /* Each packet accomulate single count, whereas HW
195 * considers each unit as Byte, so we need convert
198 profile->commit.rate = roc_profile->commit_rate * 8;
199 profile->peak.rate = roc_profile->peak_rate * 8;
201 profile->commit.rate = roc_profile->commit_rate;
202 profile->peak.rate = roc_profile->peak_rate;
204 profile->commit.size = roc_profile->commit_sz;
205 profile->peak.size = roc_profile->peak_sz;
207 return nix_tm_shaper_profile_add(roc_nix, profile, 1);
211 roc_nix_tm_shaper_profile_delete(struct roc_nix *roc_nix, uint32_t id)
213 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
214 struct nix_tm_shaper_profile *profile;
216 profile = nix_tm_shaper_profile_search(nix, id);
218 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
220 if (profile->ref_cnt)
221 return NIX_ERR_TM_SHAPER_PROFILE_IN_USE;
223 plt_tm_dbg("Removing TM shaper profile %u", id);
224 TAILQ_REMOVE(&nix->shaper_profile_list, profile, shaper);
225 nix_tm_shaper_profile_free(profile);
227 /* update min rate */
228 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
233 roc_nix_tm_node_add(struct roc_nix *roc_nix, struct roc_nix_tm_node *roc_node)
235 struct nix_tm_node *node;
237 node = (struct nix_tm_node *)&roc_node->reserved;
238 node->id = roc_node->id;
239 node->priority = roc_node->priority;
240 node->weight = roc_node->weight;
241 node->lvl = roc_node->lvl;
242 node->parent_id = roc_node->parent_id;
243 node->shaper_profile_id = roc_node->shaper_profile_id;
244 node->pkt_mode = roc_node->pkt_mode;
245 node->pkt_mode_set = roc_node->pkt_mode_set;
246 node->free_fn = roc_node->free_fn;
247 node->tree = ROC_NIX_TM_USER;
249 return nix_tm_node_add(roc_nix, node);
253 roc_nix_tm_node_pkt_mode_update(struct roc_nix *roc_nix, uint32_t node_id,
256 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
257 struct nix_tm_node *node, *child;
258 struct nix_tm_node_list *list;
259 int num_children = 0;
261 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
263 return NIX_ERR_TM_INVALID_NODE;
265 if (node->pkt_mode == pkt_mode) {
266 node->pkt_mode_set = true;
270 /* Check for any existing children, if there are any,
271 * then we cannot update the pkt mode as children's quantum
272 * are already taken in.
274 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
275 TAILQ_FOREACH(child, list, node) {
276 if (child->parent == node)
280 /* Cannot update mode if it has children or tree is enabled */
281 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) && num_children)
284 if (node->pkt_mode_set && num_children)
285 return NIX_ERR_TM_PKT_MODE_MISMATCH;
287 node->pkt_mode = pkt_mode;
288 node->pkt_mode_set = true;
294 roc_nix_tm_node_name_get(struct roc_nix *roc_nix, uint32_t node_id, char *buf,
297 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
298 struct nix_tm_node *node;
300 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
302 plt_strlcpy(buf, "???", buflen);
303 return NIX_ERR_TM_INVALID_NODE;
306 if (node->hw_lvl == NIX_TXSCH_LVL_CNT)
307 snprintf(buf, buflen, "SQ_%d", node->id);
309 snprintf(buf, buflen, "%s_%d", nix_tm_hwlvl2str(node->hw_lvl),
315 roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)
317 return nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);
321 roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
323 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
324 uint16_t sqb_cnt, head_off, tail_off;
325 uint16_t sq_cnt = nix->nb_tx_queues;
326 struct mbox *mbox = (&nix->dev)->mbox;
327 struct nix_tm_node_list *list;
328 enum roc_nix_tm_tree tree;
329 struct nix_tm_node *node;
330 struct roc_nix_sq *sq;
335 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
338 plt_tm_dbg("Disabling hierarchy on %s", nix->pci_dev->name);
341 list = nix_tm_node_list(nix, tree);
343 /* Enable CGX RXTX to drain pkts */
344 if (!roc_nix->io_enabled) {
345 /* Though it enables both RX MCAM Entries and CGX Link
346 * we assume all the rx queues are stopped way back.
348 mbox_alloc_msg_nix_lf_start_rx(mbox);
349 rc = mbox_process(mbox);
351 plt_err("cgx start failed, rc=%d", rc);
357 TAILQ_FOREACH(node, list, node) {
358 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
360 if (!(node->flags & NIX_TM_NODE_HWRES))
363 rc = nix_tm_smq_xoff(nix, node, false);
365 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
371 /* Flush all tx queues */
372 for (i = 0; i < sq_cnt; i++) {
377 rc = roc_nix_tm_sq_aura_fc(sq, false);
379 plt_err("Failed to disable sqb aura fc, rc=%d", rc);
383 /* Wait for sq entries to be flushed */
384 rc = roc_nix_tm_sq_flush_spin(sq);
386 plt_err("Failed to drain sq, rc=%d\n", rc);
391 /* XOFF & Flush all SMQ's. HRM mandates
392 * all SQ's empty before SMQ flush is issued.
394 TAILQ_FOREACH(node, list, node) {
395 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
397 if (!(node->flags & NIX_TM_NODE_HWRES))
400 rc = nix_tm_smq_xoff(nix, node, true);
402 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
407 node->flags &= ~NIX_TM_NODE_ENABLED;
410 /* Verify sanity of all tx queues */
411 for (i = 0; i < sq_cnt; i++) {
416 wdata = ((uint64_t)sq->qid << 32);
417 regaddr = nix->base + NIX_LF_SQ_OP_STATUS;
418 val = roc_atomic64_add_nosync(wdata, (int64_t *)regaddr);
420 sqb_cnt = val & 0xFFFF;
421 head_off = (val >> 20) & 0x3F;
422 tail_off = (val >> 28) & 0x3F;
424 if (sqb_cnt > 1 || head_off != tail_off ||
425 (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))
426 plt_err("Failed to gracefully flush sq %u", sq->qid);
429 nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
431 /* Restore cgx state */
432 if (!roc_nix->io_enabled) {
433 mbox_alloc_msg_nix_lf_stop_rx(mbox);
434 rc |= mbox_process(mbox);
440 roc_nix_tm_hierarchy_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree tree,
443 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
444 struct nix_tm_node_list *list;
445 struct nix_tm_node *node;
446 struct roc_nix_sq *sq;
451 if (tree >= ROC_NIX_TM_TREE_MAX)
452 return NIX_ERR_PARAM;
454 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
455 if (nix->tm_tree != tree)
460 plt_tm_dbg("Enabling hierarchy on %s, xmit_ena %u, tree %u",
461 nix->pci_dev->name, xmit_enable, tree);
463 /* Free hw resources of other trees */
464 tree_mask = NIX_TM_TREE_MASK_ALL;
465 tree_mask &= ~BIT(tree);
467 rc = nix_tm_free_resources(roc_nix, tree_mask, true);
469 plt_err("failed to free resources of other trees, rc=%d", rc);
473 /* Update active tree before starting to do anything */
476 nix_tm_update_parent_info(nix, tree);
478 rc = nix_tm_alloc_txschq(nix, tree);
480 plt_err("TM failed to alloc tm resources=%d", rc);
484 rc = nix_tm_assign_resources(nix, tree);
486 plt_err("TM failed to assign tm resources=%d", rc);
490 rc = nix_tm_txsch_reg_config(nix, tree);
492 plt_err("TM failed to configure sched registers=%d", rc);
496 list = nix_tm_node_list(nix, tree);
497 /* Mark all non-leaf's as enabled */
498 TAILQ_FOREACH(node, list, node) {
499 if (!nix_tm_is_leaf(nix, node->lvl))
500 node->flags |= NIX_TM_NODE_ENABLED;
506 /* Update SQ Sched Data while SQ is idle */
507 TAILQ_FOREACH(node, list, node) {
508 if (!nix_tm_is_leaf(nix, node->lvl))
511 rc = nix_tm_sq_sched_conf(nix, node, false);
513 plt_err("SQ %u sched update failed, rc=%d", node->id,
519 /* Finally XON all SMQ's */
520 TAILQ_FOREACH(node, list, node) {
521 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
524 rc = nix_tm_smq_xoff(nix, node, false);
526 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
532 /* Enable xmit as all the topology is ready */
533 TAILQ_FOREACH(node, list, node) {
534 if (!nix_tm_is_leaf(nix, node->lvl))
538 sq = nix->sqs[sq_id];
540 rc = roc_nix_tm_sq_aura_fc(sq, true);
542 plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
546 node->flags |= NIX_TM_NODE_ENABLED;
550 nix->tm_flags |= NIX_TM_HIERARCHY_ENA;
555 roc_nix_tm_node_suspend_resume(struct roc_nix *roc_nix, uint32_t node_id,
558 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
559 struct mbox *mbox = (&nix->dev)->mbox;
560 struct nix_txschq_config *req;
561 struct nix_tm_node *node;
565 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
567 return NIX_ERR_TM_INVALID_NODE;
570 flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
571 (flags | NIX_TM_NODE_ENABLED);
573 if (node->flags == flags)
576 /* send mbox for state change */
577 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
579 req->lvl = node->hw_lvl;
581 nix_tm_sw_xoff_prep(node, suspend, req->reg, req->regval);
582 rc = mbox_process(mbox);
589 roc_nix_tm_prealloc_res(struct roc_nix *roc_nix, uint8_t lvl,
590 uint16_t discontig, uint16_t contig)
592 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
593 struct mbox *mbox = (&nix->dev)->mbox;
594 struct nix_txsch_alloc_req *req;
595 struct nix_txsch_alloc_rsp *rsp;
599 hw_lvl = nix_tm_lvl2nix(nix, lvl);
600 if (hw_lvl == NIX_TXSCH_LVL_CNT)
603 /* Preallocate contiguous */
604 if (nix->contig_rsvd[hw_lvl] < contig) {
605 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
608 req->schq_contig[hw_lvl] = contig - nix->contig_rsvd[hw_lvl];
610 rc = mbox_process_msg(mbox, (void *)&rsp);
614 nix_tm_copy_rsp_to_nix(nix, rsp);
617 /* Preallocate contiguous */
618 if (nix->discontig_rsvd[hw_lvl] < discontig) {
619 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
622 req->schq[hw_lvl] = discontig - nix->discontig_rsvd[hw_lvl];
624 rc = mbox_process_msg(mbox, (void *)&rsp);
628 nix_tm_copy_rsp_to_nix(nix, rsp);
631 /* Save thresholds */
632 nix->contig_rsvd[hw_lvl] = contig;
633 nix->discontig_rsvd[hw_lvl] = discontig;
634 /* Release anything present above thresholds */
635 nix_tm_release_resources(nix, hw_lvl, true, true);
636 nix_tm_release_resources(nix, hw_lvl, false, true);
641 roc_nix_tm_node_shaper_update(struct roc_nix *roc_nix, uint32_t node_id,
642 uint32_t profile_id, bool force_update)
644 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
645 struct nix_tm_shaper_profile *profile = NULL;
646 struct mbox *mbox = (&nix->dev)->mbox;
647 struct nix_txschq_config *req;
648 struct nix_tm_node *node;
652 /* Shaper updates valid only for user nodes */
653 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
654 if (!node || nix_tm_is_leaf(nix, node->lvl))
655 return NIX_ERR_TM_INVALID_NODE;
657 if (profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE) {
658 profile = nix_tm_shaper_profile_search(nix, profile_id);
660 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
663 /* Pkt mode should match existing node's pkt mode */
664 if (profile && profile->pkt_mode != node->pkt_mode)
665 return NIX_ERR_TM_PKT_MODE_MISMATCH;
667 if ((profile_id == node->shaper_profile_id) && !force_update) {
669 } else if (profile_id != node->shaper_profile_id) {
670 struct nix_tm_shaper_profile *old;
672 /* Find old shaper profile and reduce ref count */
673 old = nix_tm_shaper_profile_search(nix,
674 node->shaper_profile_id);
681 /* Reduce older shaper ref count and increase new one */
682 node->shaper_profile_id = profile_id;
685 /* Nothing to do if hierarchy not yet enabled */
686 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
689 node->flags &= ~NIX_TM_NODE_ENABLED;
691 /* Flush the specific node with SW_XOFF */
692 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
693 req->lvl = node->hw_lvl;
694 k = nix_tm_sw_xoff_prep(node, true, req->reg, req->regval);
697 rc = mbox_process(mbox);
701 /* Update the PIR/CIR and clear SW XOFF */
702 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
703 req->lvl = node->hw_lvl;
705 k = nix_tm_shaper_reg_prep(node, profile, req->reg, req->regval);
707 k += nix_tm_sw_xoff_prep(node, false, &req->reg[k], &req->regval[k]);
710 rc = mbox_process(mbox);
712 node->flags |= NIX_TM_NODE_ENABLED;
717 roc_nix_tm_node_parent_update(struct roc_nix *roc_nix, uint32_t node_id,
718 uint32_t new_parent_id, uint32_t priority,
721 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
722 struct mbox *mbox = (&nix->dev)->mbox;
723 struct nix_tm_node *node, *sibling;
724 struct nix_tm_node *new_parent;
725 struct nix_txschq_config *req;
726 struct nix_tm_node_list *list;
730 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
732 return NIX_ERR_TM_INVALID_NODE;
734 /* Parent id valid only for non root nodes */
735 if (node->hw_lvl != nix->tm_root_lvl) {
737 nix_tm_node_search(nix, new_parent_id, ROC_NIX_TM_USER);
739 return NIX_ERR_TM_INVALID_PARENT;
741 /* Current support is only for dynamic weight update */
742 if (node->parent != new_parent || node->priority != priority)
743 return NIX_ERR_TM_PARENT_PRIO_UPDATE;
746 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
747 /* Skip if no change */
748 if (node->weight == weight)
751 node->weight = weight;
753 /* Nothing to do if hierarchy not yet enabled */
754 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
757 /* For leaf nodes, SQ CTX needs update */
758 if (nix_tm_is_leaf(nix, node->lvl)) {
759 /* Update SQ quantum data on the fly */
760 rc = nix_tm_sq_sched_conf(nix, node, true);
762 return NIX_ERR_TM_SQ_UPDATE_FAIL;
764 /* XOFF Parent node */
765 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
766 req->lvl = node->parent->hw_lvl;
767 req->num_regs = nix_tm_sw_xoff_prep(node->parent, true,
768 req->reg, req->regval);
769 rc = mbox_process(mbox);
773 /* XOFF this node and all other siblings */
774 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
775 req->lvl = node->hw_lvl;
778 TAILQ_FOREACH(sibling, list, node) {
779 if (sibling->parent != node->parent)
781 k += nix_tm_sw_xoff_prep(sibling, true, &req->reg[k],
785 rc = mbox_process(mbox);
789 /* Update new weight for current node */
790 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
791 req->lvl = node->hw_lvl;
793 nix_tm_sched_reg_prep(nix, node, req->reg, req->regval);
794 rc = mbox_process(mbox);
798 /* XON this node and all other siblings */
799 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
800 req->lvl = node->hw_lvl;
803 TAILQ_FOREACH(sibling, list, node) {
804 if (sibling->parent != node->parent)
806 k += nix_tm_sw_xoff_prep(sibling, false, &req->reg[k],
810 rc = mbox_process(mbox);
814 /* XON Parent node */
815 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
816 req->lvl = node->parent->hw_lvl;
817 req->num_regs = nix_tm_sw_xoff_prep(node->parent, false,
818 req->reg, req->regval);
819 rc = mbox_process(mbox);
827 roc_nix_tm_init(struct roc_nix *roc_nix)
829 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
833 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
834 plt_err("Cannot init while existing hierarchy is enabled");
838 /* Free up all user resources already held */
839 tree_mask = NIX_TM_TREE_MASK_ALL;
840 rc = nix_tm_free_resources(roc_nix, tree_mask, false);
842 plt_err("Failed to freeup all nodes and resources, rc=%d", rc);
846 /* Prepare default tree */
847 rc = nix_tm_prepare_default_tree(roc_nix);
849 plt_err("failed to prepare default tm tree, rc=%d", rc);
853 /* Prepare rlimit tree */
854 rc = nix_tm_prepare_rate_limited_tree(roc_nix);
856 plt_err("failed to prepare rlimit tm tree, rc=%d", rc);
864 roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate)
866 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
867 struct nix_tm_shaper_profile profile;
868 struct mbox *mbox = (&nix->dev)->mbox;
869 struct nix_tm_node *node, *parent;
871 volatile uint64_t *reg, *regval;
872 struct nix_txschq_config *req;
877 if (nix->tm_tree != ROC_NIX_TM_RLIMIT ||
878 !(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
879 return NIX_ERR_TM_INVALID_TREE;
881 node = nix_tm_node_search(nix, qid, ROC_NIX_TM_RLIMIT);
883 /* check if we found a valid leaf node */
884 if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
885 node->parent->hw_id == NIX_TM_HW_ID_INVALID)
886 return NIX_ERR_TM_INVALID_NODE;
888 parent = node->parent;
889 flags = parent->flags;
891 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
892 req->lvl = NIX_TXSCH_LVL_MDQ;
894 regval = req->regval;
897 k += nix_tm_sw_xoff_prep(parent, true, ®[k], ®val[k]);
898 flags &= ~NIX_TM_NODE_ENABLED;
902 if (!(flags & NIX_TM_NODE_ENABLED)) {
903 k += nix_tm_sw_xoff_prep(parent, false, ®[k], ®val[k]);
904 flags |= NIX_TM_NODE_ENABLED;
907 /* Use only PIR for rate limit */
908 memset(&profile, 0, sizeof(profile));
909 profile.peak.rate = rate;
910 /* Minimum burst of ~4us Bytes of Tx */
911 profile.peak.size = PLT_MAX((uint64_t)roc_nix_max_pkt_len(roc_nix),
912 (4ul * rate) / ((uint64_t)1E6 * 8));
913 if (!nix->tm_rate_min || nix->tm_rate_min > rate)
914 nix->tm_rate_min = rate;
916 k += nix_tm_shaper_reg_prep(parent, &profile, ®[k], ®val[k]);
919 rc = mbox_process(mbox);
923 parent->flags = flags;
928 roc_nix_tm_fini(struct roc_nix *roc_nix)
930 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
931 struct mbox *mbox = (&nix->dev)->mbox;
932 struct nix_txsch_free_req *req;
937 /* Xmit is assumed to be disabled */
938 /* Free up resources already held */
939 tree_mask = NIX_TM_TREE_MASK_ALL;
940 rc = nix_tm_free_resources(roc_nix, tree_mask, false);
942 plt_err("Failed to freeup existing nodes or rsrcs, rc=%d", rc);
944 /* Free all other hw resources */
945 req = mbox_alloc_msg_nix_txsch_free(mbox);
949 req->flags = TXSCHQ_FREE_ALL;
950 rc = mbox_process(mbox);
952 plt_err("Failed to freeup all res, rc=%d", rc);
954 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
955 plt_bitmap_reset(nix->schq_bmp[hw_lvl]);
956 plt_bitmap_reset(nix->schq_contig_bmp[hw_lvl]);
957 nix->contig_rsvd[hw_lvl] = 0;
958 nix->discontig_rsvd[hw_lvl] = 0;
961 /* Clear shaper profiles */
962 nix_tm_clear_shaper_profiles(nix);
964 nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
968 roc_nix_tm_rsrc_count(struct roc_nix *roc_nix, uint16_t schq[ROC_TM_LVL_MAX])
970 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
971 struct mbox *mbox = (&nix->dev)->mbox;
972 struct free_rsrcs_rsp *rsp;
976 /* Get the current free resources */
977 mbox_alloc_msg_free_rsrc_cnt(mbox);
978 rc = mbox_process_msg(mbox, (void *)&rsp);
982 for (i = 0; i < ROC_TM_LVL_MAX; i++) {
983 hw_lvl = nix_tm_lvl2nix(nix, i);
984 if (hw_lvl == NIX_TXSCH_LVL_CNT)
987 schq[i] = (nix->is_nix1 ? rsp->schq_nix1[hw_lvl] :
995 roc_nix_tm_rsrc_max(bool pf, uint16_t schq[ROC_TM_LVL_MAX])
1000 for (i = 0; i < ROC_TM_LVL_MAX; i++) {
1001 hw_lvl = pf ? nix_tm_lvl2nix_tl1_root(i) :
1002 nix_tm_lvl2nix_tl2_root(i);
1005 case NIX_TXSCH_LVL_SMQ:
1006 max = (roc_model_is_cn9k() ?
1007 NIX_CN9K_TXSCH_LVL_SMQ_MAX :
1008 NIX_TXSCH_LVL_SMQ_MAX);
1010 case NIX_TXSCH_LVL_TL4:
1011 max = NIX_TXSCH_LVL_TL4_MAX;
1013 case NIX_TXSCH_LVL_TL3:
1014 max = NIX_TXSCH_LVL_TL3_MAX;
1016 case NIX_TXSCH_LVL_TL2:
1017 max = pf ? NIX_TXSCH_LVL_TL2_MAX : 1;
1019 case NIX_TXSCH_LVL_TL1:
1031 roc_nix_tm_root_has_sp(struct roc_nix *roc_nix)
1033 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1035 if (nix->tm_flags & NIX_TM_TL1_NO_SP)