1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
11 struct npa_aq_enq_req *req;
12 struct npa_aq_enq_rsp *rsp;
18 plt_tm_dbg("Setting SQ %u SQB aura FC to %s", sq->qid,
19 enable ? "enable" : "disable");
21 lf = idev_npa_obj_get();
23 return NPA_ERR_DEVICE_NOT_BOUNDED;
26 /* Set/clear sqb aura fc_ena */
27 aura_handle = sq->aura_handle;
28 req = mbox_alloc_msg_npa_aq_enq(mbox);
32 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
33 req->ctype = NPA_AQ_CTYPE_AURA;
34 req->op = NPA_AQ_INSTOP_WRITE;
35 /* Below is not needed for aura writes but AF driver needs it */
36 /* AF will translate to associated poolctx */
37 req->aura.pool_addr = req->aura_id;
39 req->aura.fc_ena = enable;
40 req->aura_mask.fc_ena = 1;
41 if (roc_model_is_cn9k() || roc_errata_npa_has_no_fc_stype_ststp()) {
42 req->aura.fc_stype = 0x0; /* STF */
43 req->aura_mask.fc_stype = 0x0; /* STF */
45 req->aura.fc_stype = 0x3; /* STSTP */
46 req->aura_mask.fc_stype = 0x3; /* STSTP */
49 rc = mbox_process(mbox);
53 /* Read back npa aura ctx */
54 req = mbox_alloc_msg_npa_aq_enq(mbox);
58 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
59 req->ctype = NPA_AQ_CTYPE_AURA;
60 req->op = NPA_AQ_INSTOP_READ;
62 rc = mbox_process_msg(mbox, (void *)&rsp);
66 /* Init when enabled as there might be no triggers */
68 *(volatile uint64_t *)sq->fc = rsp->aura.count;
70 *(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
71 /* Sync write barrier */
77 roc_nix_tm_free_resources(struct roc_nix *roc_nix, bool hw_only)
79 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
81 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA)
84 return nix_tm_free_resources(roc_nix, BIT(ROC_NIX_TM_USER), hw_only);
88 nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)
90 uint64_t min_rate = profile->commit.rate;
92 if (!profile->pkt_mode)
95 profile->pkt_mode_adj = 1;
97 if (profile->commit.rate &&
98 (profile->commit.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
99 profile->commit.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
100 return NIX_ERR_TM_INVALID_COMMIT_RATE;
102 if (profile->peak.rate &&
103 (profile->peak.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
104 profile->peak.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
105 return NIX_ERR_TM_INVALID_PEAK_RATE;
107 if (profile->peak.rate && min_rate > profile->peak.rate)
108 min_rate = profile->peak.rate;
110 /* Each packet accumulate single count, whereas HW
111 * considers each unit as Byte, so we need convert
114 profile->commit.rate = profile->commit.rate * 8;
115 profile->peak.rate = profile->peak.rate * 8;
116 min_rate = min_rate * 8;
118 if (min_rate && (min_rate < NIX_TM_MIN_SHAPER_RATE)) {
119 int adjust = NIX_TM_MIN_SHAPER_RATE / min_rate;
121 if (adjust > NIX_TM_LENGTH_ADJUST_MAX)
122 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
124 profile->pkt_mode_adj += adjust;
125 profile->commit.rate += (adjust * profile->commit.rate);
126 profile->peak.rate += (adjust * profile->peak.rate);
127 /* Number of tokens freed after scheduling was proportional
130 profile->commit.size *= adjust;
131 profile->peak.size *= adjust;
138 nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
139 struct nix_tm_shaper_profile *profile, int skip_ins)
141 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
142 uint64_t commit_rate, commit_sz;
143 uint64_t min_burst, max_burst;
144 uint64_t peak_rate, peak_sz;
149 rc = nix_tm_adjust_shaper_pps_rate(profile);
153 commit_rate = profile->commit.rate;
154 commit_sz = profile->commit.size;
155 peak_rate = profile->peak.rate;
156 peak_sz = profile->peak.size;
158 min_burst = NIX_TM_MIN_SHAPER_BURST;
159 max_burst = roc_nix_tm_max_shaper_burst_get();
161 if (nix_tm_shaper_profile_search(nix, id) && !skip_ins)
162 return NIX_ERR_TM_SHAPER_PROFILE_EXISTS;
164 if (profile->pkt_len_adj < NIX_TM_LENGTH_ADJUST_MIN ||
165 profile->pkt_len_adj > NIX_TM_LENGTH_ADJUST_MAX)
166 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
168 /* We cannot support both pkt length adjust and pkt mode */
169 if (profile->pkt_mode && profile->pkt_len_adj)
170 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
172 /* commit rate and burst size can be enabled/disabled */
173 if (commit_rate || commit_sz) {
174 if (commit_sz < min_burst || commit_sz > max_burst)
175 return NIX_ERR_TM_INVALID_COMMIT_SZ;
176 else if (!nix_tm_shaper_rate_conv(commit_rate, NULL, NULL, NULL,
178 return NIX_ERR_TM_INVALID_COMMIT_RATE;
181 /* Peak rate and burst size can be enabled/disabled */
182 if (peak_sz || peak_rate) {
183 if (peak_sz < min_burst || peak_sz > max_burst)
184 return NIX_ERR_TM_INVALID_PEAK_SZ;
185 else if (!nix_tm_shaper_rate_conv(peak_rate, NULL, NULL, NULL,
187 return NIX_ERR_TM_INVALID_PEAK_RATE;
190 /* If PIR and CIR are requested, PIR should always be larger than CIR */
191 if (peak_rate && commit_rate && (commit_rate > peak_rate))
192 return NIX_ERR_TM_INVALID_PEAK_RATE;
195 TAILQ_INSERT_TAIL(&nix->shaper_profile_list, profile, shaper);
197 plt_tm_dbg("Added TM shaper profile %u, "
198 " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
199 ", cbs %" PRIu64 " , adj %u, pkt_mode %u",
200 id, profile->peak.rate, profile->peak.size,
201 profile->commit.rate, profile->commit.size,
202 profile->pkt_len_adj, profile->pkt_mode);
204 /* Always use PIR for single rate shaping */
205 if (!peak_rate && commit_rate) {
206 profile->peak.rate = profile->commit.rate;
207 profile->peak.size = profile->commit.size;
208 profile->commit.rate = 0;
209 profile->commit.size = 0;
212 /* update min rate */
213 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
218 roc_nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
219 struct roc_nix_tm_shaper_profile *roc_profile)
221 struct nix_tm_shaper_profile *profile;
223 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
225 profile->ref_cnt = 0;
226 profile->id = roc_profile->id;
227 profile->commit.rate = roc_profile->commit_rate;
228 profile->peak.rate = roc_profile->peak_rate;
229 profile->commit.size = roc_profile->commit_sz;
230 profile->peak.size = roc_profile->peak_sz;
231 profile->pkt_len_adj = roc_profile->pkt_len_adj;
232 profile->pkt_mode = roc_profile->pkt_mode;
233 profile->free_fn = roc_profile->free_fn;
234 profile->accuracy = roc_profile->accuracy;
236 return nix_tm_shaper_profile_add(roc_nix, profile, 0);
240 roc_nix_tm_shaper_profile_update(struct roc_nix *roc_nix,
241 struct roc_nix_tm_shaper_profile *roc_profile)
243 struct nix_tm_shaper_profile *profile;
245 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
247 profile->commit.rate = roc_profile->commit_rate;
248 profile->peak.rate = roc_profile->peak_rate;
249 profile->commit.size = roc_profile->commit_sz;
250 profile->peak.size = roc_profile->peak_sz;
251 profile->pkt_len_adj = roc_profile->pkt_len_adj;
252 profile->accuracy = roc_profile->accuracy;
254 return nix_tm_shaper_profile_add(roc_nix, profile, 1);
258 roc_nix_tm_shaper_profile_delete(struct roc_nix *roc_nix, uint32_t id)
260 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
261 struct nix_tm_shaper_profile *profile;
263 profile = nix_tm_shaper_profile_search(nix, id);
265 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
267 if (profile->ref_cnt)
268 return NIX_ERR_TM_SHAPER_PROFILE_IN_USE;
270 plt_tm_dbg("Removing TM shaper profile %u", id);
271 TAILQ_REMOVE(&nix->shaper_profile_list, profile, shaper);
272 nix_tm_shaper_profile_free(profile);
274 /* update min rate */
275 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
280 roc_nix_tm_node_add(struct roc_nix *roc_nix, struct roc_nix_tm_node *roc_node)
282 struct nix_tm_node *node;
284 node = (struct nix_tm_node *)&roc_node->reserved;
285 node->id = roc_node->id;
286 node->priority = roc_node->priority;
287 node->weight = roc_node->weight;
288 node->lvl = roc_node->lvl;
289 node->parent_id = roc_node->parent_id;
290 node->shaper_profile_id = roc_node->shaper_profile_id;
291 node->pkt_mode = roc_node->pkt_mode;
292 node->pkt_mode_set = roc_node->pkt_mode_set;
293 node->free_fn = roc_node->free_fn;
294 node->tree = ROC_NIX_TM_USER;
296 return nix_tm_node_add(roc_nix, node);
300 roc_nix_tm_node_pkt_mode_update(struct roc_nix *roc_nix, uint32_t node_id,
303 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
304 struct nix_tm_node *node, *child;
305 struct nix_tm_node_list *list;
306 int num_children = 0;
308 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
310 return NIX_ERR_TM_INVALID_NODE;
312 if (node->pkt_mode == pkt_mode) {
313 node->pkt_mode_set = true;
317 /* Check for any existing children, if there are any,
318 * then we cannot update the pkt mode as children's quantum
319 * are already taken in.
321 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
322 TAILQ_FOREACH(child, list, node) {
323 if (child->parent == node)
327 /* Cannot update mode if it has children or tree is enabled */
328 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) && num_children)
331 if (node->pkt_mode_set && num_children)
332 return NIX_ERR_TM_PKT_MODE_MISMATCH;
334 node->pkt_mode = pkt_mode;
335 node->pkt_mode_set = true;
341 roc_nix_tm_node_name_get(struct roc_nix *roc_nix, uint32_t node_id, char *buf,
344 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
345 struct nix_tm_node *node;
347 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
349 plt_strlcpy(buf, "???", buflen);
350 return NIX_ERR_TM_INVALID_NODE;
353 if (node->hw_lvl == NIX_TXSCH_LVL_CNT)
354 snprintf(buf, buflen, "SQ_%d", node->id);
356 snprintf(buf, buflen, "%s_%d", nix_tm_hwlvl2str(node->hw_lvl),
362 roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)
364 return nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);
368 roc_nix_smq_flush(struct roc_nix *roc_nix)
370 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
371 struct nix_tm_node_list *list;
372 enum roc_nix_tm_tree tree;
373 struct nix_tm_node *node;
376 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
380 list = nix_tm_node_list(nix, tree);
382 /* XOFF & Flush all SMQ's. HRM mandates
383 * all SQ's empty before SMQ flush is issued.
385 TAILQ_FOREACH(node, list, node) {
386 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
388 if (!(node->flags & NIX_TM_NODE_HWRES))
391 rc = nix_tm_smq_xoff(nix, node, true);
393 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
400 TAILQ_FOREACH(node, list, node) {
401 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
403 if (!(node->flags & NIX_TM_NODE_HWRES))
406 rc = nix_tm_smq_xoff(nix, node, false);
408 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
418 roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
420 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
421 uint16_t sqb_cnt, head_off, tail_off;
422 uint16_t sq_cnt = nix->nb_tx_queues;
423 struct mbox *mbox = (&nix->dev)->mbox;
424 struct nix_tm_node_list *list;
425 enum roc_nix_tm_tree tree;
426 struct nix_tm_node *node;
427 struct roc_nix_sq *sq;
432 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
435 plt_tm_dbg("Disabling hierarchy on %s", nix->pci_dev->name);
438 list = nix_tm_node_list(nix, tree);
440 /* Enable CGX RXTX to drain pkts */
441 if (!roc_nix->io_enabled) {
442 /* Though it enables both RX MCAM Entries and CGX Link
443 * we assume all the rx queues are stopped way back.
445 mbox_alloc_msg_nix_lf_start_rx(mbox);
446 rc = mbox_process(mbox);
448 plt_err("cgx start failed, rc=%d", rc);
454 TAILQ_FOREACH(node, list, node) {
455 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
457 if (!(node->flags & NIX_TM_NODE_HWRES))
460 rc = nix_tm_smq_xoff(nix, node, false);
462 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
468 /* Disable backpressure, it will be enabled back if needed on
471 for (i = 0; i < sq_cnt; i++) {
476 rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
477 if (rc && rc != -ENOENT) {
478 plt_err("Failed to disable backpressure, rc=%d", rc);
483 /* Flush all tx queues */
484 for (i = 0; i < sq_cnt; i++) {
489 rc = roc_nix_tm_sq_aura_fc(sq, false);
491 plt_err("Failed to disable sqb aura fc, rc=%d", rc);
495 /* Wait for sq entries to be flushed */
496 rc = roc_nix_tm_sq_flush_spin(sq);
498 plt_err("Failed to drain sq, rc=%d\n", rc);
503 /* XOFF & Flush all SMQ's. HRM mandates
504 * all SQ's empty before SMQ flush is issued.
506 TAILQ_FOREACH(node, list, node) {
507 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
509 if (!(node->flags & NIX_TM_NODE_HWRES))
512 rc = nix_tm_smq_xoff(nix, node, true);
514 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
519 node->flags &= ~NIX_TM_NODE_ENABLED;
522 /* Verify sanity of all tx queues */
523 for (i = 0; i < sq_cnt; i++) {
528 wdata = ((uint64_t)sq->qid << 32);
529 regaddr = nix->base + NIX_LF_SQ_OP_STATUS;
530 val = roc_atomic64_add_nosync(wdata, (int64_t *)regaddr);
532 sqb_cnt = val & 0xFFFF;
533 head_off = (val >> 20) & 0x3F;
534 tail_off = (val >> 28) & 0x3F;
536 if (sqb_cnt > 1 || head_off != tail_off ||
537 (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))
538 plt_err("Failed to gracefully flush sq %u", sq->qid);
541 nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
543 /* Restore cgx state */
544 if (!roc_nix->io_enabled) {
545 mbox_alloc_msg_nix_lf_stop_rx(mbox);
546 rc |= mbox_process(mbox);
552 roc_nix_tm_hierarchy_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree tree,
555 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
556 struct nix_tm_node_list *list;
557 struct nix_tm_node *node;
558 struct roc_nix_sq *sq;
563 if (tree >= ROC_NIX_TM_TREE_MAX)
564 return NIX_ERR_PARAM;
566 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
567 if (nix->tm_tree != tree)
572 plt_tm_dbg("Enabling hierarchy on %s, xmit_ena %u, tree %u",
573 nix->pci_dev->name, xmit_enable, tree);
575 /* Free hw resources of other trees */
576 tree_mask = NIX_TM_TREE_MASK_ALL;
577 tree_mask &= ~BIT(tree);
579 rc = nix_tm_free_resources(roc_nix, tree_mask, true);
581 plt_err("failed to free resources of other trees, rc=%d", rc);
585 /* Update active tree before starting to do anything */
588 nix_tm_update_parent_info(nix, tree);
590 rc = nix_tm_alloc_txschq(nix, tree);
592 plt_err("TM failed to alloc tm resources=%d", rc);
596 rc = nix_tm_assign_resources(nix, tree);
598 plt_err("TM failed to assign tm resources=%d", rc);
602 rc = nix_tm_txsch_reg_config(nix, tree);
604 plt_err("TM failed to configure sched registers=%d", rc);
608 list = nix_tm_node_list(nix, tree);
609 /* Mark all non-leaf's as enabled */
610 TAILQ_FOREACH(node, list, node) {
611 if (!nix_tm_is_leaf(nix, node->lvl))
612 node->flags |= NIX_TM_NODE_ENABLED;
618 /* Update SQ Sched Data while SQ is idle */
619 TAILQ_FOREACH(node, list, node) {
620 if (!nix_tm_is_leaf(nix, node->lvl))
623 rc = nix_tm_sq_sched_conf(nix, node, false);
625 plt_err("SQ %u sched update failed, rc=%d", node->id,
631 /* Finally XON all SMQ's */
632 TAILQ_FOREACH(node, list, node) {
633 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
636 rc = nix_tm_smq_xoff(nix, node, false);
638 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
644 /* Enable xmit as all the topology is ready */
645 TAILQ_FOREACH(node, list, node) {
646 if (!nix_tm_is_leaf(nix, node->lvl))
650 sq = nix->sqs[sq_id];
652 rc = roc_nix_tm_sq_aura_fc(sq, true);
654 plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
658 node->flags |= NIX_TM_NODE_ENABLED;
662 nix->tm_flags |= NIX_TM_HIERARCHY_ENA;
667 roc_nix_tm_node_suspend_resume(struct roc_nix *roc_nix, uint32_t node_id,
670 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
671 struct mbox *mbox = (&nix->dev)->mbox;
672 struct nix_txschq_config *req;
673 struct nix_tm_node *node;
677 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
679 return NIX_ERR_TM_INVALID_NODE;
682 flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
683 (flags | NIX_TM_NODE_ENABLED);
685 if (node->flags == flags)
688 /* send mbox for state change */
689 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
691 req->lvl = node->hw_lvl;
693 nix_tm_sw_xoff_prep(node, suspend, req->reg, req->regval);
694 rc = mbox_process(mbox);
701 roc_nix_tm_prealloc_res(struct roc_nix *roc_nix, uint8_t lvl,
702 uint16_t discontig, uint16_t contig)
704 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
705 struct mbox *mbox = (&nix->dev)->mbox;
706 struct nix_txsch_alloc_req *req;
707 struct nix_txsch_alloc_rsp *rsp;
711 hw_lvl = nix_tm_lvl2nix(nix, lvl);
712 if (hw_lvl == NIX_TXSCH_LVL_CNT)
715 /* Preallocate contiguous */
716 if (nix->contig_rsvd[hw_lvl] < contig) {
717 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
720 req->schq_contig[hw_lvl] = contig - nix->contig_rsvd[hw_lvl];
722 rc = mbox_process_msg(mbox, (void *)&rsp);
726 nix_tm_copy_rsp_to_nix(nix, rsp);
729 /* Preallocate contiguous */
730 if (nix->discontig_rsvd[hw_lvl] < discontig) {
731 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
734 req->schq[hw_lvl] = discontig - nix->discontig_rsvd[hw_lvl];
736 rc = mbox_process_msg(mbox, (void *)&rsp);
740 nix_tm_copy_rsp_to_nix(nix, rsp);
743 /* Save thresholds */
744 nix->contig_rsvd[hw_lvl] = contig;
745 nix->discontig_rsvd[hw_lvl] = discontig;
746 /* Release anything present above thresholds */
747 nix_tm_release_resources(nix, hw_lvl, true, true);
748 nix_tm_release_resources(nix, hw_lvl, false, true);
753 roc_nix_tm_node_shaper_update(struct roc_nix *roc_nix, uint32_t node_id,
754 uint32_t profile_id, bool force_update)
756 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
757 struct nix_tm_shaper_profile *profile = NULL;
758 struct mbox *mbox = (&nix->dev)->mbox;
759 struct nix_txschq_config *req;
760 struct nix_tm_node *node;
764 /* Shaper updates valid only for user nodes */
765 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
766 if (!node || nix_tm_is_leaf(nix, node->lvl))
767 return NIX_ERR_TM_INVALID_NODE;
769 if (profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE) {
770 profile = nix_tm_shaper_profile_search(nix, profile_id);
772 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
775 /* Pkt mode should match existing node's pkt mode */
776 if (profile && profile->pkt_mode != node->pkt_mode)
777 return NIX_ERR_TM_PKT_MODE_MISMATCH;
779 if ((profile_id == node->shaper_profile_id) && !force_update) {
781 } else if (profile_id != node->shaper_profile_id) {
782 struct nix_tm_shaper_profile *old;
784 /* Find old shaper profile and reduce ref count */
785 old = nix_tm_shaper_profile_search(nix,
786 node->shaper_profile_id);
793 /* Reduce older shaper ref count and increase new one */
794 node->shaper_profile_id = profile_id;
797 /* Nothing to do if hierarchy not yet enabled */
798 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
801 node->flags &= ~NIX_TM_NODE_ENABLED;
803 /* Flush the specific node with SW_XOFF */
804 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
805 req->lvl = node->hw_lvl;
806 k = nix_tm_sw_xoff_prep(node, true, req->reg, req->regval);
809 rc = mbox_process(mbox);
813 /* Update the PIR/CIR and clear SW XOFF */
814 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
815 req->lvl = node->hw_lvl;
817 k = nix_tm_shaper_reg_prep(node, profile, req->reg, req->regval);
819 k += nix_tm_sw_xoff_prep(node, false, &req->reg[k], &req->regval[k]);
822 rc = mbox_process(mbox);
824 node->flags |= NIX_TM_NODE_ENABLED;
829 roc_nix_tm_node_parent_update(struct roc_nix *roc_nix, uint32_t node_id,
830 uint32_t new_parent_id, uint32_t priority,
833 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
834 struct mbox *mbox = (&nix->dev)->mbox;
835 struct nix_tm_node *node, *sibling;
836 struct nix_tm_node *new_parent;
837 struct nix_txschq_config *req;
838 struct nix_tm_node_list *list;
842 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
844 return NIX_ERR_TM_INVALID_NODE;
846 /* Parent id valid only for non root nodes */
847 if (node->hw_lvl != nix->tm_root_lvl) {
849 nix_tm_node_search(nix, new_parent_id, ROC_NIX_TM_USER);
851 return NIX_ERR_TM_INVALID_PARENT;
853 /* Current support is only for dynamic weight update */
854 if (node->parent != new_parent || node->priority != priority)
855 return NIX_ERR_TM_PARENT_PRIO_UPDATE;
858 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
859 /* Skip if no change */
860 if (node->weight == weight)
863 node->weight = weight;
865 /* Nothing to do if hierarchy not yet enabled */
866 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
869 /* For leaf nodes, SQ CTX needs update */
870 if (nix_tm_is_leaf(nix, node->lvl)) {
871 /* Update SQ quantum data on the fly */
872 rc = nix_tm_sq_sched_conf(nix, node, true);
874 return NIX_ERR_TM_SQ_UPDATE_FAIL;
876 /* XOFF Parent node */
877 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
878 req->lvl = node->parent->hw_lvl;
879 req->num_regs = nix_tm_sw_xoff_prep(node->parent, true,
880 req->reg, req->regval);
881 rc = mbox_process(mbox);
885 /* XOFF this node and all other siblings */
886 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
887 req->lvl = node->hw_lvl;
890 TAILQ_FOREACH(sibling, list, node) {
891 if (sibling->parent != node->parent)
893 k += nix_tm_sw_xoff_prep(sibling, true, &req->reg[k],
897 rc = mbox_process(mbox);
901 /* Update new weight for current node */
902 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
903 req->lvl = node->hw_lvl;
905 nix_tm_sched_reg_prep(nix, node, req->reg, req->regval);
906 rc = mbox_process(mbox);
910 /* XON this node and all other siblings */
911 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
912 req->lvl = node->hw_lvl;
915 TAILQ_FOREACH(sibling, list, node) {
916 if (sibling->parent != node->parent)
918 k += nix_tm_sw_xoff_prep(sibling, false, &req->reg[k],
922 rc = mbox_process(mbox);
926 /* XON Parent node */
927 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
928 req->lvl = node->parent->hw_lvl;
929 req->num_regs = nix_tm_sw_xoff_prep(node->parent, false,
930 req->reg, req->regval);
931 rc = mbox_process(mbox);
939 roc_nix_tm_init(struct roc_nix *roc_nix)
941 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
945 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
946 plt_err("Cannot init while existing hierarchy is enabled");
950 /* Free up all user resources already held */
951 tree_mask = NIX_TM_TREE_MASK_ALL;
952 rc = nix_tm_free_resources(roc_nix, tree_mask, false);
954 plt_err("Failed to freeup all nodes and resources, rc=%d", rc);
958 /* Prepare default tree */
959 rc = nix_tm_prepare_default_tree(roc_nix);
961 plt_err("failed to prepare default tm tree, rc=%d", rc);
969 roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate)
971 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
972 struct nix_tm_shaper_profile profile;
973 struct mbox *mbox = (&nix->dev)->mbox;
974 struct nix_tm_node *node, *parent;
976 volatile uint64_t *reg, *regval;
977 struct nix_txschq_config *req;
982 if ((nix->tm_tree == ROC_NIX_TM_USER) ||
983 !(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
984 return NIX_ERR_TM_INVALID_TREE;
986 node = nix_tm_node_search(nix, qid, nix->tm_tree);
988 /* check if we found a valid leaf node */
989 if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
990 node->parent->hw_id == NIX_TM_HW_ID_INVALID)
991 return NIX_ERR_TM_INVALID_NODE;
993 parent = node->parent;
994 flags = parent->flags;
996 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
997 req->lvl = NIX_TXSCH_LVL_MDQ;
999 regval = req->regval;
1002 k += nix_tm_sw_xoff_prep(parent, true, ®[k], ®val[k]);
1003 flags &= ~NIX_TM_NODE_ENABLED;
1007 if (!(flags & NIX_TM_NODE_ENABLED)) {
1008 k += nix_tm_sw_xoff_prep(parent, false, ®[k], ®val[k]);
1009 flags |= NIX_TM_NODE_ENABLED;
1012 /* Use only PIR for rate limit */
1013 memset(&profile, 0, sizeof(profile));
1014 profile.peak.rate = rate;
1015 /* Minimum burst of ~4us Bytes of Tx */
1016 profile.peak.size = PLT_MAX((uint64_t)roc_nix_max_pkt_len(roc_nix),
1017 (4ul * rate) / ((uint64_t)1E6 * 8));
1018 if (!nix->tm_rate_min || nix->tm_rate_min > rate)
1019 nix->tm_rate_min = rate;
1021 k += nix_tm_shaper_reg_prep(parent, &profile, ®[k], ®val[k]);
1024 rc = mbox_process(mbox);
1028 parent->flags = flags;
1033 roc_nix_tm_fini(struct roc_nix *roc_nix)
1035 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1036 struct mbox *mbox = (&nix->dev)->mbox;
1037 struct nix_txsch_free_req *req;
1042 /* Xmit is assumed to be disabled */
1043 /* Free up resources already held */
1044 tree_mask = NIX_TM_TREE_MASK_ALL;
1045 rc = nix_tm_free_resources(roc_nix, tree_mask, false);
1047 plt_err("Failed to freeup existing nodes or rsrcs, rc=%d", rc);
1049 /* Free all other hw resources */
1050 req = mbox_alloc_msg_nix_txsch_free(mbox);
1054 req->flags = TXSCHQ_FREE_ALL;
1055 rc = mbox_process(mbox);
1057 plt_err("Failed to freeup all res, rc=%d", rc);
1059 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1060 plt_bitmap_reset(nix->schq_bmp[hw_lvl]);
1061 plt_bitmap_reset(nix->schq_contig_bmp[hw_lvl]);
1062 nix->contig_rsvd[hw_lvl] = 0;
1063 nix->discontig_rsvd[hw_lvl] = 0;
1066 /* Clear shaper profiles */
1067 nix_tm_clear_shaper_profiles(nix);
1069 nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
1073 roc_nix_tm_rsrc_count(struct roc_nix *roc_nix, uint16_t schq[ROC_TM_LVL_MAX])
1075 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1076 struct mbox *mbox = (&nix->dev)->mbox;
1077 struct free_rsrcs_rsp *rsp;
1081 /* Get the current free resources */
1082 mbox_alloc_msg_free_rsrc_cnt(mbox);
1083 rc = mbox_process_msg(mbox, (void *)&rsp);
1087 for (i = 0; i < ROC_TM_LVL_MAX; i++) {
1088 hw_lvl = nix_tm_lvl2nix(nix, i);
1089 if (hw_lvl == NIX_TXSCH_LVL_CNT)
1092 schq[i] = (nix->is_nix1 ? rsp->schq_nix1[hw_lvl] :
1100 roc_nix_tm_rsrc_max(bool pf, uint16_t schq[ROC_TM_LVL_MAX])
1105 for (i = 0; i < ROC_TM_LVL_MAX; i++) {
1106 hw_lvl = pf ? nix_tm_lvl2nix_tl1_root(i) :
1107 nix_tm_lvl2nix_tl2_root(i);
1110 case NIX_TXSCH_LVL_SMQ:
1111 max = (roc_model_is_cn9k() ?
1112 NIX_CN9K_TXSCH_LVL_SMQ_MAX :
1113 NIX_TXSCH_LVL_SMQ_MAX);
1115 case NIX_TXSCH_LVL_TL4:
1116 max = NIX_TXSCH_LVL_TL4_MAX;
1118 case NIX_TXSCH_LVL_TL3:
1119 max = NIX_TXSCH_LVL_TL3_MAX;
1121 case NIX_TXSCH_LVL_TL2:
1122 max = pf ? NIX_TXSCH_LVL_TL2_MAX : 1;
1124 case NIX_TXSCH_LVL_TL1:
1136 roc_nix_tm_root_has_sp(struct roc_nix *roc_nix)
1138 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1140 if (nix->tm_flags & NIX_TM_TL1_NO_SP)