1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
11 struct npa_aq_enq_req *req;
12 struct npa_aq_enq_rsp *rsp;
18 plt_tm_dbg("Setting SQ %u SQB aura FC to %s", sq->qid,
19 enable ? "enable" : "disable");
21 lf = idev_npa_obj_get();
23 return NPA_ERR_DEVICE_NOT_BOUNDED;
26 /* Set/clear sqb aura fc_ena */
27 aura_handle = sq->aura_handle;
28 req = mbox_alloc_msg_npa_aq_enq(mbox);
32 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
33 req->ctype = NPA_AQ_CTYPE_AURA;
34 req->op = NPA_AQ_INSTOP_WRITE;
35 /* Below is not needed for aura writes but AF driver needs it */
36 /* AF will translate to associated poolctx */
37 req->aura.pool_addr = req->aura_id;
39 req->aura.fc_ena = enable;
40 req->aura_mask.fc_ena = 1;
41 if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0()) {
42 req->aura.fc_stype = 0x0; /* STF */
43 req->aura_mask.fc_stype = 0x0; /* STF */
45 req->aura.fc_stype = 0x3; /* STSTP */
46 req->aura_mask.fc_stype = 0x3; /* STSTP */
49 rc = mbox_process(mbox);
53 /* Read back npa aura ctx */
54 req = mbox_alloc_msg_npa_aq_enq(mbox);
58 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
59 req->ctype = NPA_AQ_CTYPE_AURA;
60 req->op = NPA_AQ_INSTOP_READ;
62 rc = mbox_process_msg(mbox, (void *)&rsp);
66 /* Init when enabled as there might be no triggers */
68 *(volatile uint64_t *)sq->fc = rsp->aura.count;
70 *(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
71 /* Sync write barrier */
77 roc_nix_tm_free_resources(struct roc_nix *roc_nix, bool hw_only)
79 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
81 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA)
84 return nix_tm_free_resources(roc_nix, BIT(ROC_NIX_TM_USER), hw_only);
88 nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)
90 uint64_t min_rate = profile->commit.rate;
92 if (!profile->pkt_mode)
95 profile->pkt_mode_adj = 1;
97 if (profile->commit.rate &&
98 (profile->commit.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
99 profile->commit.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
100 return NIX_ERR_TM_INVALID_COMMIT_RATE;
102 if (profile->peak.rate &&
103 (profile->peak.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
104 profile->peak.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
105 return NIX_ERR_TM_INVALID_PEAK_RATE;
107 if (profile->peak.rate && min_rate > profile->peak.rate)
108 min_rate = profile->peak.rate;
110 /* Each packet accumulate single count, whereas HW
111 * considers each unit as Byte, so we need convert
114 profile->commit.rate = profile->commit.rate * 8;
115 profile->peak.rate = profile->peak.rate * 8;
116 min_rate = min_rate * 8;
118 if (min_rate && (min_rate < NIX_TM_MIN_SHAPER_RATE)) {
119 int adjust = NIX_TM_MIN_SHAPER_RATE / min_rate;
121 if (adjust > NIX_TM_LENGTH_ADJUST_MAX)
122 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
124 profile->pkt_mode_adj += adjust;
125 profile->commit.rate += (adjust * profile->commit.rate);
126 profile->peak.rate += (adjust * profile->peak.rate);
127 /* Number of tokens freed after scheduling was proportional
130 profile->commit.size *= adjust;
131 profile->peak.size *= adjust;
138 nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
139 struct nix_tm_shaper_profile *profile, int skip_ins)
141 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
142 uint64_t commit_rate, commit_sz;
143 uint64_t min_burst, max_burst;
144 uint64_t peak_rate, peak_sz;
149 rc = nix_tm_adjust_shaper_pps_rate(profile);
153 commit_rate = profile->commit.rate;
154 commit_sz = profile->commit.size;
155 peak_rate = profile->peak.rate;
156 peak_sz = profile->peak.size;
158 min_burst = NIX_TM_MIN_SHAPER_BURST;
159 max_burst = roc_nix_tm_max_shaper_burst_get();
161 if (nix_tm_shaper_profile_search(nix, id) && !skip_ins)
162 return NIX_ERR_TM_SHAPER_PROFILE_EXISTS;
164 if (profile->pkt_len_adj < NIX_TM_LENGTH_ADJUST_MIN ||
165 profile->pkt_len_adj > NIX_TM_LENGTH_ADJUST_MAX)
166 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
168 /* We cannot support both pkt length adjust and pkt mode */
169 if (profile->pkt_mode && profile->pkt_len_adj)
170 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
172 /* commit rate and burst size can be enabled/disabled */
173 if (commit_rate || commit_sz) {
174 if (commit_sz < min_burst || commit_sz > max_burst)
175 return NIX_ERR_TM_INVALID_COMMIT_SZ;
176 else if (!nix_tm_shaper_rate_conv(commit_rate, NULL, NULL,
178 return NIX_ERR_TM_INVALID_COMMIT_RATE;
181 /* Peak rate and burst size can be enabled/disabled */
182 if (peak_sz || peak_rate) {
183 if (peak_sz < min_burst || peak_sz > max_burst)
184 return NIX_ERR_TM_INVALID_PEAK_SZ;
185 else if (!nix_tm_shaper_rate_conv(peak_rate, NULL, NULL, NULL))
186 return NIX_ERR_TM_INVALID_PEAK_RATE;
189 /* If PIR and CIR are requested, PIR should always be larger than CIR */
190 if (peak_rate && commit_rate && (commit_rate > peak_rate))
191 return NIX_ERR_TM_INVALID_PEAK_RATE;
194 TAILQ_INSERT_TAIL(&nix->shaper_profile_list, profile, shaper);
196 plt_tm_dbg("Added TM shaper profile %u, "
197 " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
198 ", cbs %" PRIu64 " , adj %u, pkt_mode %u",
199 id, profile->peak.rate, profile->peak.size,
200 profile->commit.rate, profile->commit.size,
201 profile->pkt_len_adj, profile->pkt_mode);
203 /* Always use PIR for single rate shaping */
204 if (!peak_rate && commit_rate) {
205 profile->peak.rate = profile->commit.rate;
206 profile->peak.size = profile->commit.size;
207 profile->commit.rate = 0;
208 profile->commit.size = 0;
211 /* update min rate */
212 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
217 roc_nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
218 struct roc_nix_tm_shaper_profile *roc_profile)
220 struct nix_tm_shaper_profile *profile;
222 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
224 profile->ref_cnt = 0;
225 profile->id = roc_profile->id;
226 profile->commit.rate = roc_profile->commit_rate;
227 profile->peak.rate = roc_profile->peak_rate;
228 profile->commit.size = roc_profile->commit_sz;
229 profile->peak.size = roc_profile->peak_sz;
230 profile->pkt_len_adj = roc_profile->pkt_len_adj;
231 profile->pkt_mode = roc_profile->pkt_mode;
232 profile->free_fn = roc_profile->free_fn;
234 return nix_tm_shaper_profile_add(roc_nix, profile, 0);
238 roc_nix_tm_shaper_profile_update(struct roc_nix *roc_nix,
239 struct roc_nix_tm_shaper_profile *roc_profile)
241 struct nix_tm_shaper_profile *profile;
243 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
245 profile->commit.rate = roc_profile->commit_rate;
246 profile->peak.rate = roc_profile->peak_rate;
247 profile->commit.size = roc_profile->commit_sz;
248 profile->peak.size = roc_profile->peak_sz;
250 return nix_tm_shaper_profile_add(roc_nix, profile, 1);
254 roc_nix_tm_shaper_profile_delete(struct roc_nix *roc_nix, uint32_t id)
256 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
257 struct nix_tm_shaper_profile *profile;
259 profile = nix_tm_shaper_profile_search(nix, id);
261 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
263 if (profile->ref_cnt)
264 return NIX_ERR_TM_SHAPER_PROFILE_IN_USE;
266 plt_tm_dbg("Removing TM shaper profile %u", id);
267 TAILQ_REMOVE(&nix->shaper_profile_list, profile, shaper);
268 nix_tm_shaper_profile_free(profile);
270 /* update min rate */
271 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
276 roc_nix_tm_node_add(struct roc_nix *roc_nix, struct roc_nix_tm_node *roc_node)
278 struct nix_tm_node *node;
280 node = (struct nix_tm_node *)&roc_node->reserved;
281 node->id = roc_node->id;
282 node->priority = roc_node->priority;
283 node->weight = roc_node->weight;
284 node->lvl = roc_node->lvl;
285 node->parent_id = roc_node->parent_id;
286 node->shaper_profile_id = roc_node->shaper_profile_id;
287 node->pkt_mode = roc_node->pkt_mode;
288 node->pkt_mode_set = roc_node->pkt_mode_set;
289 node->free_fn = roc_node->free_fn;
290 node->tree = ROC_NIX_TM_USER;
292 return nix_tm_node_add(roc_nix, node);
296 roc_nix_tm_node_pkt_mode_update(struct roc_nix *roc_nix, uint32_t node_id,
299 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
300 struct nix_tm_node *node, *child;
301 struct nix_tm_node_list *list;
302 int num_children = 0;
304 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
306 return NIX_ERR_TM_INVALID_NODE;
308 if (node->pkt_mode == pkt_mode) {
309 node->pkt_mode_set = true;
313 /* Check for any existing children, if there are any,
314 * then we cannot update the pkt mode as children's quantum
315 * are already taken in.
317 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
318 TAILQ_FOREACH(child, list, node) {
319 if (child->parent == node)
323 /* Cannot update mode if it has children or tree is enabled */
324 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) && num_children)
327 if (node->pkt_mode_set && num_children)
328 return NIX_ERR_TM_PKT_MODE_MISMATCH;
330 node->pkt_mode = pkt_mode;
331 node->pkt_mode_set = true;
337 roc_nix_tm_node_name_get(struct roc_nix *roc_nix, uint32_t node_id, char *buf,
340 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
341 struct nix_tm_node *node;
343 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
345 plt_strlcpy(buf, "???", buflen);
346 return NIX_ERR_TM_INVALID_NODE;
349 if (node->hw_lvl == NIX_TXSCH_LVL_CNT)
350 snprintf(buf, buflen, "SQ_%d", node->id);
352 snprintf(buf, buflen, "%s_%d", nix_tm_hwlvl2str(node->hw_lvl),
358 roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)
360 return nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);
364 roc_nix_smq_flush(struct roc_nix *roc_nix)
366 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
367 struct nix_tm_node_list *list;
368 enum roc_nix_tm_tree tree;
369 struct nix_tm_node *node;
372 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
376 list = nix_tm_node_list(nix, tree);
378 /* XOFF & Flush all SMQ's. HRM mandates
379 * all SQ's empty before SMQ flush is issued.
381 TAILQ_FOREACH(node, list, node) {
382 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
384 if (!(node->flags & NIX_TM_NODE_HWRES))
387 rc = nix_tm_smq_xoff(nix, node, true);
389 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
396 TAILQ_FOREACH(node, list, node) {
397 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
399 if (!(node->flags & NIX_TM_NODE_HWRES))
402 rc = nix_tm_smq_xoff(nix, node, false);
404 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
414 roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
416 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
417 uint16_t sqb_cnt, head_off, tail_off;
418 uint16_t sq_cnt = nix->nb_tx_queues;
419 struct mbox *mbox = (&nix->dev)->mbox;
420 struct nix_tm_node_list *list;
421 enum roc_nix_tm_tree tree;
422 struct nix_tm_node *node;
423 struct roc_nix_sq *sq;
428 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
431 plt_tm_dbg("Disabling hierarchy on %s", nix->pci_dev->name);
434 list = nix_tm_node_list(nix, tree);
436 /* Enable CGX RXTX to drain pkts */
437 if (!roc_nix->io_enabled) {
438 /* Though it enables both RX MCAM Entries and CGX Link
439 * we assume all the rx queues are stopped way back.
441 mbox_alloc_msg_nix_lf_start_rx(mbox);
442 rc = mbox_process(mbox);
444 plt_err("cgx start failed, rc=%d", rc);
450 TAILQ_FOREACH(node, list, node) {
451 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
453 if (!(node->flags & NIX_TM_NODE_HWRES))
456 rc = nix_tm_smq_xoff(nix, node, false);
458 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
464 /* Disable backpressure, it will be enabled back if needed on
467 for (i = 0; i < sq_cnt; i++) {
472 rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
474 plt_err("Failed to disable backpressure, rc=%d", rc);
479 /* Flush all tx queues */
480 for (i = 0; i < sq_cnt; i++) {
485 rc = roc_nix_tm_sq_aura_fc(sq, false);
487 plt_err("Failed to disable sqb aura fc, rc=%d", rc);
491 /* Wait for sq entries to be flushed */
492 rc = roc_nix_tm_sq_flush_spin(sq);
494 plt_err("Failed to drain sq, rc=%d\n", rc);
499 /* XOFF & Flush all SMQ's. HRM mandates
500 * all SQ's empty before SMQ flush is issued.
502 TAILQ_FOREACH(node, list, node) {
503 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
505 if (!(node->flags & NIX_TM_NODE_HWRES))
508 rc = nix_tm_smq_xoff(nix, node, true);
510 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
515 node->flags &= ~NIX_TM_NODE_ENABLED;
518 /* Verify sanity of all tx queues */
519 for (i = 0; i < sq_cnt; i++) {
524 wdata = ((uint64_t)sq->qid << 32);
525 regaddr = nix->base + NIX_LF_SQ_OP_STATUS;
526 val = roc_atomic64_add_nosync(wdata, (int64_t *)regaddr);
528 sqb_cnt = val & 0xFFFF;
529 head_off = (val >> 20) & 0x3F;
530 tail_off = (val >> 28) & 0x3F;
532 if (sqb_cnt > 1 || head_off != tail_off ||
533 (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))
534 plt_err("Failed to gracefully flush sq %u", sq->qid);
537 nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
539 /* Restore cgx state */
540 if (!roc_nix->io_enabled) {
541 mbox_alloc_msg_nix_lf_stop_rx(mbox);
542 rc |= mbox_process(mbox);
548 roc_nix_tm_hierarchy_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree tree,
551 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
552 struct nix_tm_node_list *list;
553 struct nix_tm_node *node;
554 struct roc_nix_sq *sq;
559 if (tree >= ROC_NIX_TM_TREE_MAX)
560 return NIX_ERR_PARAM;
562 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
563 if (nix->tm_tree != tree)
568 plt_tm_dbg("Enabling hierarchy on %s, xmit_ena %u, tree %u",
569 nix->pci_dev->name, xmit_enable, tree);
571 /* Free hw resources of other trees */
572 tree_mask = NIX_TM_TREE_MASK_ALL;
573 tree_mask &= ~BIT(tree);
575 rc = nix_tm_free_resources(roc_nix, tree_mask, true);
577 plt_err("failed to free resources of other trees, rc=%d", rc);
581 /* Update active tree before starting to do anything */
584 nix_tm_update_parent_info(nix, tree);
586 rc = nix_tm_alloc_txschq(nix, tree);
588 plt_err("TM failed to alloc tm resources=%d", rc);
592 rc = nix_tm_assign_resources(nix, tree);
594 plt_err("TM failed to assign tm resources=%d", rc);
598 rc = nix_tm_txsch_reg_config(nix, tree);
600 plt_err("TM failed to configure sched registers=%d", rc);
604 list = nix_tm_node_list(nix, tree);
605 /* Mark all non-leaf's as enabled */
606 TAILQ_FOREACH(node, list, node) {
607 if (!nix_tm_is_leaf(nix, node->lvl))
608 node->flags |= NIX_TM_NODE_ENABLED;
614 /* Update SQ Sched Data while SQ is idle */
615 TAILQ_FOREACH(node, list, node) {
616 if (!nix_tm_is_leaf(nix, node->lvl))
619 rc = nix_tm_sq_sched_conf(nix, node, false);
621 plt_err("SQ %u sched update failed, rc=%d", node->id,
627 /* Finally XON all SMQ's */
628 TAILQ_FOREACH(node, list, node) {
629 if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
632 rc = nix_tm_smq_xoff(nix, node, false);
634 plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
640 /* Enable xmit as all the topology is ready */
641 TAILQ_FOREACH(node, list, node) {
642 if (!nix_tm_is_leaf(nix, node->lvl))
646 sq = nix->sqs[sq_id];
648 rc = roc_nix_tm_sq_aura_fc(sq, true);
650 plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
654 node->flags |= NIX_TM_NODE_ENABLED;
658 nix->tm_flags |= NIX_TM_HIERARCHY_ENA;
663 roc_nix_tm_node_suspend_resume(struct roc_nix *roc_nix, uint32_t node_id,
666 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
667 struct mbox *mbox = (&nix->dev)->mbox;
668 struct nix_txschq_config *req;
669 struct nix_tm_node *node;
673 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
675 return NIX_ERR_TM_INVALID_NODE;
678 flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
679 (flags | NIX_TM_NODE_ENABLED);
681 if (node->flags == flags)
684 /* send mbox for state change */
685 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
687 req->lvl = node->hw_lvl;
689 nix_tm_sw_xoff_prep(node, suspend, req->reg, req->regval);
690 rc = mbox_process(mbox);
697 roc_nix_tm_prealloc_res(struct roc_nix *roc_nix, uint8_t lvl,
698 uint16_t discontig, uint16_t contig)
700 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
701 struct mbox *mbox = (&nix->dev)->mbox;
702 struct nix_txsch_alloc_req *req;
703 struct nix_txsch_alloc_rsp *rsp;
707 hw_lvl = nix_tm_lvl2nix(nix, lvl);
708 if (hw_lvl == NIX_TXSCH_LVL_CNT)
711 /* Preallocate contiguous */
712 if (nix->contig_rsvd[hw_lvl] < contig) {
713 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
716 req->schq_contig[hw_lvl] = contig - nix->contig_rsvd[hw_lvl];
718 rc = mbox_process_msg(mbox, (void *)&rsp);
722 nix_tm_copy_rsp_to_nix(nix, rsp);
725 /* Preallocate contiguous */
726 if (nix->discontig_rsvd[hw_lvl] < discontig) {
727 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
730 req->schq[hw_lvl] = discontig - nix->discontig_rsvd[hw_lvl];
732 rc = mbox_process_msg(mbox, (void *)&rsp);
736 nix_tm_copy_rsp_to_nix(nix, rsp);
739 /* Save thresholds */
740 nix->contig_rsvd[hw_lvl] = contig;
741 nix->discontig_rsvd[hw_lvl] = discontig;
742 /* Release anything present above thresholds */
743 nix_tm_release_resources(nix, hw_lvl, true, true);
744 nix_tm_release_resources(nix, hw_lvl, false, true);
749 roc_nix_tm_node_shaper_update(struct roc_nix *roc_nix, uint32_t node_id,
750 uint32_t profile_id, bool force_update)
752 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
753 struct nix_tm_shaper_profile *profile = NULL;
754 struct mbox *mbox = (&nix->dev)->mbox;
755 struct nix_txschq_config *req;
756 struct nix_tm_node *node;
760 /* Shaper updates valid only for user nodes */
761 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
762 if (!node || nix_tm_is_leaf(nix, node->lvl))
763 return NIX_ERR_TM_INVALID_NODE;
765 if (profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE) {
766 profile = nix_tm_shaper_profile_search(nix, profile_id);
768 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
771 /* Pkt mode should match existing node's pkt mode */
772 if (profile && profile->pkt_mode != node->pkt_mode)
773 return NIX_ERR_TM_PKT_MODE_MISMATCH;
775 if ((profile_id == node->shaper_profile_id) && !force_update) {
777 } else if (profile_id != node->shaper_profile_id) {
778 struct nix_tm_shaper_profile *old;
780 /* Find old shaper profile and reduce ref count */
781 old = nix_tm_shaper_profile_search(nix,
782 node->shaper_profile_id);
789 /* Reduce older shaper ref count and increase new one */
790 node->shaper_profile_id = profile_id;
793 /* Nothing to do if hierarchy not yet enabled */
794 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
797 node->flags &= ~NIX_TM_NODE_ENABLED;
799 /* Flush the specific node with SW_XOFF */
800 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
801 req->lvl = node->hw_lvl;
802 k = nix_tm_sw_xoff_prep(node, true, req->reg, req->regval);
805 rc = mbox_process(mbox);
809 /* Update the PIR/CIR and clear SW XOFF */
810 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
811 req->lvl = node->hw_lvl;
813 k = nix_tm_shaper_reg_prep(node, profile, req->reg, req->regval);
815 k += nix_tm_sw_xoff_prep(node, false, &req->reg[k], &req->regval[k]);
818 rc = mbox_process(mbox);
820 node->flags |= NIX_TM_NODE_ENABLED;
825 roc_nix_tm_node_parent_update(struct roc_nix *roc_nix, uint32_t node_id,
826 uint32_t new_parent_id, uint32_t priority,
829 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
830 struct mbox *mbox = (&nix->dev)->mbox;
831 struct nix_tm_node *node, *sibling;
832 struct nix_tm_node *new_parent;
833 struct nix_txschq_config *req;
834 struct nix_tm_node_list *list;
838 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
840 return NIX_ERR_TM_INVALID_NODE;
842 /* Parent id valid only for non root nodes */
843 if (node->hw_lvl != nix->tm_root_lvl) {
845 nix_tm_node_search(nix, new_parent_id, ROC_NIX_TM_USER);
847 return NIX_ERR_TM_INVALID_PARENT;
849 /* Current support is only for dynamic weight update */
850 if (node->parent != new_parent || node->priority != priority)
851 return NIX_ERR_TM_PARENT_PRIO_UPDATE;
854 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
855 /* Skip if no change */
856 if (node->weight == weight)
859 node->weight = weight;
861 /* Nothing to do if hierarchy not yet enabled */
862 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
865 /* For leaf nodes, SQ CTX needs update */
866 if (nix_tm_is_leaf(nix, node->lvl)) {
867 /* Update SQ quantum data on the fly */
868 rc = nix_tm_sq_sched_conf(nix, node, true);
870 return NIX_ERR_TM_SQ_UPDATE_FAIL;
872 /* XOFF Parent node */
873 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
874 req->lvl = node->parent->hw_lvl;
875 req->num_regs = nix_tm_sw_xoff_prep(node->parent, true,
876 req->reg, req->regval);
877 rc = mbox_process(mbox);
881 /* XOFF this node and all other siblings */
882 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
883 req->lvl = node->hw_lvl;
886 TAILQ_FOREACH(sibling, list, node) {
887 if (sibling->parent != node->parent)
889 k += nix_tm_sw_xoff_prep(sibling, true, &req->reg[k],
893 rc = mbox_process(mbox);
897 /* Update new weight for current node */
898 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
899 req->lvl = node->hw_lvl;
901 nix_tm_sched_reg_prep(nix, node, req->reg, req->regval);
902 rc = mbox_process(mbox);
906 /* XON this node and all other siblings */
907 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
908 req->lvl = node->hw_lvl;
911 TAILQ_FOREACH(sibling, list, node) {
912 if (sibling->parent != node->parent)
914 k += nix_tm_sw_xoff_prep(sibling, false, &req->reg[k],
918 rc = mbox_process(mbox);
922 /* XON Parent node */
923 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
924 req->lvl = node->parent->hw_lvl;
925 req->num_regs = nix_tm_sw_xoff_prep(node->parent, false,
926 req->reg, req->regval);
927 rc = mbox_process(mbox);
935 roc_nix_tm_init(struct roc_nix *roc_nix)
937 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
941 if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
942 plt_err("Cannot init while existing hierarchy is enabled");
946 /* Free up all user resources already held */
947 tree_mask = NIX_TM_TREE_MASK_ALL;
948 rc = nix_tm_free_resources(roc_nix, tree_mask, false);
950 plt_err("Failed to freeup all nodes and resources, rc=%d", rc);
954 /* Prepare default tree */
955 rc = nix_tm_prepare_default_tree(roc_nix);
957 plt_err("failed to prepare default tm tree, rc=%d", rc);
965 roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate)
967 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
968 struct nix_tm_shaper_profile profile;
969 struct mbox *mbox = (&nix->dev)->mbox;
970 struct nix_tm_node *node, *parent;
972 volatile uint64_t *reg, *regval;
973 struct nix_txschq_config *req;
978 if ((nix->tm_tree == ROC_NIX_TM_USER) ||
979 !(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
980 return NIX_ERR_TM_INVALID_TREE;
982 node = nix_tm_node_search(nix, qid, nix->tm_tree);
984 /* check if we found a valid leaf node */
985 if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
986 node->parent->hw_id == NIX_TM_HW_ID_INVALID)
987 return NIX_ERR_TM_INVALID_NODE;
989 parent = node->parent;
990 flags = parent->flags;
992 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
993 req->lvl = NIX_TXSCH_LVL_MDQ;
995 regval = req->regval;
998 k += nix_tm_sw_xoff_prep(parent, true, ®[k], ®val[k]);
999 flags &= ~NIX_TM_NODE_ENABLED;
1003 if (!(flags & NIX_TM_NODE_ENABLED)) {
1004 k += nix_tm_sw_xoff_prep(parent, false, ®[k], ®val[k]);
1005 flags |= NIX_TM_NODE_ENABLED;
1008 /* Use only PIR for rate limit */
1009 memset(&profile, 0, sizeof(profile));
1010 profile.peak.rate = rate;
1011 /* Minimum burst of ~4us Bytes of Tx */
1012 profile.peak.size = PLT_MAX((uint64_t)roc_nix_max_pkt_len(roc_nix),
1013 (4ul * rate) / ((uint64_t)1E6 * 8));
1014 if (!nix->tm_rate_min || nix->tm_rate_min > rate)
1015 nix->tm_rate_min = rate;
1017 k += nix_tm_shaper_reg_prep(parent, &profile, ®[k], ®val[k]);
1020 rc = mbox_process(mbox);
1024 parent->flags = flags;
1029 roc_nix_tm_fini(struct roc_nix *roc_nix)
1031 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1032 struct mbox *mbox = (&nix->dev)->mbox;
1033 struct nix_txsch_free_req *req;
1038 /* Xmit is assumed to be disabled */
1039 /* Free up resources already held */
1040 tree_mask = NIX_TM_TREE_MASK_ALL;
1041 rc = nix_tm_free_resources(roc_nix, tree_mask, false);
1043 plt_err("Failed to freeup existing nodes or rsrcs, rc=%d", rc);
1045 /* Free all other hw resources */
1046 req = mbox_alloc_msg_nix_txsch_free(mbox);
1050 req->flags = TXSCHQ_FREE_ALL;
1051 rc = mbox_process(mbox);
1053 plt_err("Failed to freeup all res, rc=%d", rc);
1055 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1056 plt_bitmap_reset(nix->schq_bmp[hw_lvl]);
1057 plt_bitmap_reset(nix->schq_contig_bmp[hw_lvl]);
1058 nix->contig_rsvd[hw_lvl] = 0;
1059 nix->discontig_rsvd[hw_lvl] = 0;
1062 /* Clear shaper profiles */
1063 nix_tm_clear_shaper_profiles(nix);
1065 nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
1069 roc_nix_tm_rsrc_count(struct roc_nix *roc_nix, uint16_t schq[ROC_TM_LVL_MAX])
1071 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1072 struct mbox *mbox = (&nix->dev)->mbox;
1073 struct free_rsrcs_rsp *rsp;
1077 /* Get the current free resources */
1078 mbox_alloc_msg_free_rsrc_cnt(mbox);
1079 rc = mbox_process_msg(mbox, (void *)&rsp);
1083 for (i = 0; i < ROC_TM_LVL_MAX; i++) {
1084 hw_lvl = nix_tm_lvl2nix(nix, i);
1085 if (hw_lvl == NIX_TXSCH_LVL_CNT)
1088 schq[i] = (nix->is_nix1 ? rsp->schq_nix1[hw_lvl] :
1096 roc_nix_tm_rsrc_max(bool pf, uint16_t schq[ROC_TM_LVL_MAX])
1101 for (i = 0; i < ROC_TM_LVL_MAX; i++) {
1102 hw_lvl = pf ? nix_tm_lvl2nix_tl1_root(i) :
1103 nix_tm_lvl2nix_tl2_root(i);
1106 case NIX_TXSCH_LVL_SMQ:
1107 max = (roc_model_is_cn9k() ?
1108 NIX_CN9K_TXSCH_LVL_SMQ_MAX :
1109 NIX_TXSCH_LVL_SMQ_MAX);
1111 case NIX_TXSCH_LVL_TL4:
1112 max = NIX_TXSCH_LVL_TL4_MAX;
1114 case NIX_TXSCH_LVL_TL3:
1115 max = NIX_TXSCH_LVL_TL3_MAX;
1117 case NIX_TXSCH_LVL_TL2:
1118 max = pf ? NIX_TXSCH_LVL_TL2_MAX : 1;
1120 case NIX_TXSCH_LVL_TL1:
1132 roc_nix_tm_root_has_sp(struct roc_nix *roc_nix)
1134 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1136 if (nix->tm_flags & NIX_TM_TL1_NO_SP)