1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 bitmap_ctzll(uint64_t slab)
14 return __builtin_ctzll(slab);
18 nix_tm_clear_shaper_profiles(struct nix *nix)
20 struct nix_tm_shaper_profile *shaper_profile;
22 shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
23 while (shaper_profile != NULL) {
24 if (shaper_profile->ref_cnt)
25 plt_warn("Shaper profile %u has non zero references",
27 TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
28 nix_tm_shaper_profile_free(shaper_profile);
29 shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
34 nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
36 uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
37 uint64_t regval[MAX_REGS_PER_MBOX_MSG];
38 struct nix_tm_shaper_profile *profile;
39 uint64_t reg[MAX_REGS_PER_MBOX_MSG];
40 struct mbox *mbox = (&nix->dev)->mbox;
41 struct nix_txschq_config *req;
46 memset(regval, 0, sizeof(regval));
47 memset(regval_mask, 0, sizeof(regval_mask));
49 profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
50 hw_lvl = node->hw_lvl;
52 /* Need this trigger to configure TL1 */
53 if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
54 /* Prepare default conf for TL1 */
55 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
56 req->lvl = NIX_TXSCH_LVL_TL1;
58 k = nix_tm_tl1_default_prep(node->parent_hw_id, req->reg,
61 rc = mbox_process(mbox);
66 /* Prepare topology config */
67 k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
69 /* Prepare schedule config */
70 k += nix_tm_sched_reg_prep(nix, node, ®[k], ®val[k]);
72 /* Prepare shaping config */
73 k += nix_tm_shaper_reg_prep(node, profile, ®[k], ®val[k]);
78 /* Copy and send config mbox */
79 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
83 mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
84 mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
85 mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
87 rc = mbox_process(mbox);
93 plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
98 nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
100 struct nix_tm_node_list *list;
101 struct nix_tm_node *node;
105 list = nix_tm_node_list(nix, tree);
107 for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
108 TAILQ_FOREACH(node, list, node) {
109 if (node->hw_lvl != hw_lvl)
111 rc = nix_tm_node_reg_conf(nix, node);
121 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
123 struct nix_tm_node *child, *parent;
124 struct nix_tm_node_list *list;
125 uint32_t rr_prio, max_prio;
128 list = nix_tm_node_list(nix, tree);
130 /* Release all the node hw resources locally
131 * if parent marked as dirty and resource exists.
133 TAILQ_FOREACH(child, list, node) {
134 /* Release resource only if parent direct hierarchy changed */
135 if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
136 child->parent->child_realloc) {
137 nix_tm_free_node_resource(nix, child);
139 child->max_prio = UINT32_MAX;
142 TAILQ_FOREACH(parent, list, node) {
143 /* Count group of children of same priority i.e are RR */
144 rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
147 /* Assuming that multiple RR groups are
148 * not configured based on capability.
150 parent->rr_prio = rr_prio;
151 parent->rr_num = rr_num;
152 parent->max_prio = max_prio;
159 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
161 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
162 struct nix_tm_shaper_profile *profile;
163 uint32_t node_id, parent_id, lvl;
164 struct nix_tm_node *parent_node;
165 uint32_t priority, profile_id;
166 uint8_t hw_lvl, exp_next_lvl;
167 enum roc_nix_tm_tree tree;
171 priority = node->priority;
172 parent_id = node->parent_id;
173 profile_id = node->shaper_profile_id;
177 plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
178 "parent %u profile 0x%x tree %u",
179 nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
180 priority, node->weight, parent_id, profile_id, tree);
182 if (tree >= ROC_NIX_TM_TREE_MAX)
183 return NIX_ERR_PARAM;
185 /* Translate sw level id's to nix hw level id's */
186 hw_lvl = nix_tm_lvl2nix(nix, lvl);
187 if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
188 return NIX_ERR_TM_INVALID_LVL;
190 /* Leaf nodes have to be same priority */
191 if (nix_tm_is_leaf(nix, lvl) && priority != 0)
192 return NIX_ERR_TM_INVALID_PRIO;
194 parent_node = nix_tm_node_search(nix, parent_id, tree);
196 if (node_id < nix->nb_tx_queues)
197 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
199 exp_next_lvl = hw_lvl + 1;
201 /* Check if there is no parent node yet */
202 if (hw_lvl != nix->tm_root_lvl &&
203 (!parent_node || parent_node->hw_lvl != exp_next_lvl))
204 return NIX_ERR_TM_INVALID_PARENT;
206 /* Check if a node already exists */
207 if (nix_tm_node_search(nix, node_id, tree))
208 return NIX_ERR_TM_NODE_EXISTS;
210 profile = nix_tm_shaper_profile_search(nix, profile_id);
211 if (!nix_tm_is_leaf(nix, lvl)) {
212 /* Check if shaper profile exists for non leaf node */
213 if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
214 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
216 /* Packet mode in profile should match with that of tm node */
217 if (profile && profile->pkt_mode != node->pkt_mode)
218 return NIX_ERR_TM_PKT_MODE_MISMATCH;
221 /* Check if there is second DWRR already in siblings or holes in prio */
222 rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
226 if (node->weight > ROC_NIX_TM_MAX_SCHED_WT)
227 return NIX_ERR_TM_WEIGHT_EXCEED;
229 /* Maintain minimum weight */
233 node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
235 node->max_prio = UINT32_MAX;
236 node->hw_id = NIX_TM_HW_ID_INVALID;
242 node->parent = parent_node;
244 parent_node->child_realloc = true;
245 node->parent_hw_id = NIX_TM_HW_ID_INVALID;
247 TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
248 plt_tm_dbg("Added node %s lvl %u id %u (%p)",
249 nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
254 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
256 struct mbox *mbox = (&nix->dev)->mbox;
257 struct nix_txschq_config *req;
258 struct nix_tm_node *p;
261 /* Enable nodes in path for flush to succeed */
262 if (!nix_tm_is_leaf(nix, node->lvl))
267 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
268 (p->flags & NIX_TM_NODE_HWRES)) {
269 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
270 req->lvl = p->hw_lvl;
271 req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
273 rc = mbox_process(mbox);
277 p->flags |= NIX_TM_NODE_ENABLED;
286 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
288 struct mbox *mbox = (&nix->dev)->mbox;
289 struct nix_txschq_config *req;
294 plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
295 enable ? "enable" : "disable");
297 rc = nix_tm_clear_path_xoff(nix, node);
301 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
302 req->lvl = NIX_TXSCH_LVL_SMQ;
305 req->reg[0] = NIX_AF_SMQX_CFG(smq);
306 req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
307 req->regval_mask[0] =
308 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
310 return mbox_process(mbox);
314 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
317 struct nix_tm_node *node;
320 node = nix_tm_node_search(nix, sq, nix->tm_tree);
322 /* Check if we found a valid leaf node */
323 if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
324 node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
328 /* Get SMQ Id of leaf node's parent */
329 *smq = node->parent->hw_id;
330 *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
332 rc = nix_tm_smq_xoff(nix, node->parent, false);
335 node->flags |= NIX_TM_NODE_ENABLED;
340 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
342 struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
343 uint16_t sqb_cnt, head_off, tail_off;
344 uint64_t wdata, val, prev;
345 uint16_t qid = sq->qid;
347 uint64_t timeout; /* 10's of usec */
349 /* Wait for enough time based on shaper min rate */
350 timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
351 /* Wait for worst case scenario of this SQ being last priority
352 * and so have to wait for all other SQ's drain out by their own.
354 timeout = timeout * nix->nb_tx_queues;
355 timeout = timeout / nix->tm_rate_min;
359 wdata = ((uint64_t)qid << 32);
360 regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
361 val = roc_atomic64_add_nosync(wdata, regaddr);
363 /* Spin multiple iterations as "sq->fc_cache_pkts" can still
364 * have space to send pkts even though fc_mem is disabled
370 val = roc_atomic64_add_nosync(wdata, regaddr);
371 /* Continue on error */
372 if (val & BIT_ULL(63))
378 sqb_cnt = val & 0xFFFF;
379 head_off = (val >> 20) & 0x3F;
380 tail_off = (val >> 28) & 0x3F;
382 /* SQ reached quiescent state */
383 if (sqb_cnt <= 1 && head_off == tail_off &&
384 (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
396 roc_nix_tm_dump(sq->roc_nix);
397 roc_nix_queues_ctx_dump(sq->roc_nix);
401 /* Flush and disable tx queue and its parent SMQ */
403 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
405 struct roc_nix *roc_nix = sq->roc_nix;
406 struct nix_tm_node *node, *sibling;
407 struct nix_tm_node_list *list;
408 enum roc_nix_tm_tree tree;
414 nix = roc_nix_to_nix_priv(roc_nix);
416 /* Need not do anything if tree is in disabled state */
417 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
420 mbox = (&nix->dev)->mbox;
424 list = nix_tm_node_list(nix, tree);
426 /* Find the node for this SQ */
427 node = nix_tm_node_search(nix, qid, tree);
428 if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
429 plt_err("Invalid node/state for sq %u", qid);
433 /* Enable CGX RXTX to drain pkts */
434 if (!roc_nix->io_enabled) {
435 /* Though it enables both RX MCAM Entries and CGX Link
436 * we assume all the rx queues are stopped way back.
438 mbox_alloc_msg_nix_lf_start_rx(mbox);
439 rc = mbox_process(mbox);
441 plt_err("cgx start failed, rc=%d", rc);
446 /* Disable smq xoff for case it was enabled earlier */
447 rc = nix_tm_smq_xoff(nix, node->parent, false);
449 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
454 /* As per HRM, to disable an SQ, all other SQ's
455 * that feed to same SMQ must be paused before SMQ flush.
457 TAILQ_FOREACH(sibling, list, node) {
458 if (sibling->parent != node->parent)
460 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
468 rc = roc_nix_tm_sq_aura_fc(sq, false);
470 plt_err("Failed to disable sqb aura fc, rc=%d", rc);
474 /* Wait for sq entries to be flushed */
475 rc = roc_nix_tm_sq_flush_spin(sq);
477 plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
482 node->flags &= ~NIX_TM_NODE_ENABLED;
484 /* Disable and flush */
485 rc = nix_tm_smq_xoff(nix, node->parent, true);
487 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
492 /* Restore cgx state */
493 if (!roc_nix->io_enabled) {
494 mbox_alloc_msg_nix_lf_stop_rx(mbox);
495 rc |= mbox_process(mbox);
502 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
504 struct roc_nix *roc_nix = sq->roc_nix;
505 struct nix_tm_node *node, *sibling;
506 struct nix_tm_node_list *list;
507 enum roc_nix_tm_tree tree;
508 struct roc_nix_sq *s_sq;
514 nix = roc_nix_to_nix_priv(roc_nix);
516 /* Need not do anything if tree is in disabled state */
517 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
522 list = nix_tm_node_list(nix, tree);
524 /* Find the node for this SQ */
525 node = nix_tm_node_search(nix, qid, tree);
527 plt_err("Invalid node for sq %u", qid);
531 /* Enable all the siblings back */
532 TAILQ_FOREACH(sibling, list, node) {
533 if (sibling->parent != node->parent)
536 if (sibling->id == qid)
539 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
543 s_sq = nix->sqs[s_qid];
548 /* Enable back if any SQ is still present */
549 rc = nix_tm_smq_xoff(nix, node->parent, false);
551 plt_err("Failed to enable smq %u, rc=%d",
552 node->parent->hw_id, rc);
558 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
560 plt_err("Failed to enable sqb aura fc, rc=%d", rc);
569 nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
570 bool rr_quantum_only)
572 struct mbox *mbox = (&nix->dev)->mbox;
573 uint16_t qid = node->id, smq;
577 smq = node->parent->hw_id;
578 rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
581 plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
584 plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
585 qid, smq, rr_quantum);
587 if (qid > nix->nb_tx_queues)
590 if (roc_model_is_cn9k()) {
591 struct nix_aq_enq_req *aq;
593 aq = mbox_alloc_msg_nix_aq_enq(mbox);
595 aq->ctype = NIX_AQ_CTYPE_SQ;
596 aq->op = NIX_AQ_INSTOP_WRITE;
598 /* smq update only when needed */
599 if (!rr_quantum_only) {
601 aq->sq_mask.smq = ~aq->sq_mask.smq;
603 aq->sq.smq_rr_quantum = rr_quantum;
604 aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
606 struct nix_cn10k_aq_enq_req *aq;
608 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
610 aq->ctype = NIX_AQ_CTYPE_SQ;
611 aq->op = NIX_AQ_INSTOP_WRITE;
613 /* smq update only when needed */
614 if (!rr_quantum_only) {
616 aq->sq_mask.smq = ~aq->sq_mask.smq;
618 aq->sq.smq_rr_weight = rr_quantum;
619 aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
622 rc = mbox_process(mbox);
624 plt_err("Failed to set smq, rc=%d", rc);
629 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
632 uint16_t avail, thresh, to_free = 0, schq;
633 struct mbox *mbox = (&nix->dev)->mbox;
634 struct nix_txsch_free_req *req;
635 struct plt_bitmap *bmp;
640 bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
642 contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
643 plt_bitmap_scan_init(bmp);
645 avail = nix_tm_resource_avail(nix, hw_lvl, contig);
648 /* Release only above threshold */
650 to_free = avail - thresh;
652 /* Release everything */
656 /* Now release resources to AF */
658 if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
661 schq = bitmap_ctzll(slab);
662 slab &= ~(1ULL << schq);
666 req = mbox_alloc_msg_nix_txsch_free(mbox);
670 req->schq_lvl = hw_lvl;
672 rc = mbox_process(mbox);
674 plt_err("failed to release hwres %s(%u) rc %d",
675 nix_tm_hwlvl2str(hw_lvl), schq, rc);
679 plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
681 plt_bitmap_clear(bmp, schq);
686 plt_err("resource inconsistency for %s(%u)",
687 nix_tm_hwlvl2str(hw_lvl), contig);
694 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
696 struct mbox *mbox = (&nix->dev)->mbox;
697 struct nix_txsch_free_req *req;
698 struct plt_bitmap *bmp;
699 uint16_t avail, hw_id;
703 hw_lvl = node->hw_lvl;
705 bmp = nix->schq_bmp[hw_lvl];
706 /* Free specific HW resource */
707 plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
708 nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
711 avail = nix_tm_resource_avail(nix, hw_lvl, false);
712 /* Always for now free to discontiguous queue when avail
715 if (nix->discontig_rsvd[hw_lvl] &&
716 avail < nix->discontig_rsvd[hw_lvl]) {
717 PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
718 PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
719 plt_bitmap_set(bmp, hw_id);
720 node->hw_id = NIX_TM_HW_ID_INVALID;
721 node->flags &= ~NIX_TM_NODE_HWRES;
726 req = mbox_alloc_msg_nix_txsch_free(mbox);
730 req->schq_lvl = node->hw_lvl;
732 rc = mbox_process(mbox);
734 plt_err("failed to release hwres %s(%u) rc %d",
735 nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
739 /* Mark parent as dirty for reallocing it's children */
741 node->parent->child_realloc = true;
743 node->hw_id = NIX_TM_HW_ID_INVALID;
744 node->flags &= ~NIX_TM_NODE_HWRES;
745 plt_tm_dbg("Released hwres %s(%u) to af",
746 nix_tm_hwlvl2str(node->hw_lvl), hw_id);
751 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
752 enum roc_nix_tm_tree tree, bool free)
754 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
755 struct nix_tm_shaper_profile *profile;
756 struct nix_tm_node *node, *child;
757 struct nix_tm_node_list *list;
761 plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
763 node = nix_tm_node_search(nix, node_id, tree);
765 return NIX_ERR_TM_INVALID_NODE;
767 list = nix_tm_node_list(nix, tree);
768 /* Check for any existing children */
769 TAILQ_FOREACH(child, list, node) {
770 if (child->parent == node)
771 return NIX_ERR_TM_CHILD_EXISTS;
774 /* Remove shaper profile reference */
775 profile_id = node->shaper_profile_id;
776 profile = nix_tm_shaper_profile_search(nix, profile_id);
778 /* Free hw resource locally */
779 if (node->flags & NIX_TM_NODE_HWRES) {
780 rc = nix_tm_free_node_resource(nix, node);
788 TAILQ_REMOVE(list, node, node);
790 plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
791 "parent %u profile 0x%x tree %u (%p)",
792 nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
793 node->priority, node->weight,
794 node->parent ? node->parent->id : UINT32_MAX,
795 node->shaper_profile_id, tree, node);
796 /* Free only if requested */
798 nix_tm_node_free(node);
803 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
804 uint16_t *contig_id, int *contig_cnt,
805 struct nix_tm_node_list *list)
807 struct nix_tm_node *child;
808 struct plt_bitmap *bmp;
809 uint8_t child_hw_lvl;
815 child_hw_lvl = parent->hw_lvl - 1;
816 bmp = nix->schq_bmp[child_hw_lvl];
817 plt_bitmap_scan_init(bmp);
820 /* Save spare schq if it is case of RR + SP */
821 if (parent->rr_prio != 0xf && *contig_cnt > 1)
822 spare_schq = *contig_id + parent->rr_prio;
824 TAILQ_FOREACH(child, list, node) {
827 if (child->parent->id != parent->id)
830 /* Resource never expected to be present */
831 if (child->flags & NIX_TM_NODE_HWRES) {
832 plt_err("Resource exists for child (%s)%u, id %u (%p)",
833 nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
839 plt_bitmap_scan(bmp, &pos, &slab);
841 if (child->priority == parent->rr_prio && spare_schq != -1) {
842 /* Use spare schq first if present */
845 *contig_cnt = *contig_cnt - 1;
847 } else if (child->priority == parent->rr_prio) {
848 /* Assign a discontiguous queue */
850 plt_err("Schq not found for Child %u "
852 child->id, child->lvl, child);
856 schq = bitmap_ctzll(slab);
857 slab &= ~(1ULL << schq);
859 plt_bitmap_clear(bmp, schq);
861 /* Assign a contiguous queue */
862 schq = *contig_id + child->priority;
863 *contig_cnt = *contig_cnt - 1;
866 plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
867 nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
871 child->parent_hw_id = parent->hw_id;
872 child->flags |= NIX_TM_NODE_HWRES;
879 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
881 struct nix_tm_node *parent, *root = NULL;
882 struct plt_bitmap *bmp, *bmp_contig;
883 struct nix_tm_node_list *list;
884 uint8_t child_hw_lvl, hw_lvl;
885 uint16_t contig_id, j;
890 list = nix_tm_node_list(nix, tree);
891 /* Walk from TL1 to TL4 parents */
892 for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
893 TAILQ_FOREACH(parent, list, node) {
894 child_hw_lvl = parent->hw_lvl - 1;
895 if (parent->hw_lvl != hw_lvl)
898 /* Remember root for future */
899 if (parent->hw_lvl == nix->tm_root_lvl)
902 if (!parent->child_realloc) {
903 /* Skip when parent is not dirty */
904 if (nix_tm_child_res_valid(list, parent))
906 plt_err("Parent not dirty but invalid "
907 "child res parent id %u(lvl %u)",
908 parent->id, parent->lvl);
912 bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
914 /* Prealloc contiguous indices for a parent */
915 contig_id = NIX_TM_MAX_HW_TXSCHQ;
916 cnt = (int)parent->max_prio + 1;
918 plt_bitmap_scan_init(bmp_contig);
919 if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
920 plt_err("Contig schq not found");
923 contig_id = pos + bitmap_ctzll(slab);
925 /* Check if we have enough */
926 for (j = contig_id; j < contig_id + cnt; j++) {
927 if (!plt_bitmap_get(bmp_contig, j))
931 if (j != contig_id + cnt) {
932 plt_err("Contig schq not sufficient");
936 for (j = contig_id; j < contig_id + cnt; j++)
937 plt_bitmap_clear(bmp_contig, j);
940 /* Assign hw id to all children */
941 rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
944 plt_err("Unexpected err, contig res alloc, "
945 "parent %u, of %s, rc=%d, cnt=%d",
946 parent->id, nix_tm_hwlvl2str(hw_lvl),
951 /* Clear the dirty bit as children's
952 * resources are reallocated.
954 parent->child_realloc = false;
958 /* Root is always expected to be there */
962 if (root->flags & NIX_TM_NODE_HWRES)
965 /* Process root node */
966 bmp = nix->schq_bmp[nix->tm_root_lvl];
967 plt_bitmap_scan_init(bmp);
968 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
969 plt_err("Resource not allocated for root");
973 root->hw_id = pos + bitmap_ctzll(slab);
974 root->flags |= NIX_TM_NODE_HWRES;
975 plt_bitmap_clear(bmp, root->hw_id);
977 /* Get TL1 id as well when root is not TL1 */
978 if (!nix_tm_have_tl1_access(nix)) {
979 bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
981 plt_bitmap_scan_init(bmp);
982 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
983 plt_err("Resource not found for TL1");
986 root->parent_hw_id = pos + bitmap_ctzll(slab);
987 plt_bitmap_clear(bmp, root->parent_hw_id);
990 plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
991 nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
997 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
1002 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1003 for (i = 0; i < rsp->schq[lvl]; i++)
1004 plt_bitmap_set(nix->schq_bmp[lvl],
1005 rsp->schq_list[lvl][i]);
1007 for (i = 0; i < rsp->schq_contig[lvl]; i++)
1008 plt_bitmap_set(nix->schq_contig_bmp[lvl],
1009 rsp->schq_contig_list[lvl][i]);
1014 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
1016 uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
1017 struct mbox *mbox = (&nix->dev)->mbox;
1018 uint16_t schq[NIX_TXSCH_LVL_CNT];
1019 struct nix_txsch_alloc_req *req;
1020 struct nix_txsch_alloc_rsp *rsp;
1025 memset(schq, 0, sizeof(schq));
1026 memset(schq_contig, 0, sizeof(schq_contig));
1028 /* Estimate requirement */
1029 rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
1033 /* Release existing contiguous resources when realloc requested
1034 * as there is no way to guarantee continuity of old with new.
1036 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1037 if (schq_contig[hw_lvl])
1038 nix_tm_release_resources(nix, hw_lvl, true, false);
1041 /* Alloc as needed */
1044 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
1049 mbox_memcpy(req->schq, schq, sizeof(req->schq));
1050 mbox_memcpy(req->schq_contig, schq_contig,
1051 sizeof(req->schq_contig));
1053 /* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
1054 * So split alloc to multiple requests.
1056 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1057 if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
1058 req->schq[i] = MAX_TXSCHQ_PER_FUNC;
1059 schq[i] -= req->schq[i];
1061 if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
1062 req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
1063 schq_contig[i] -= req->schq_contig[i];
1065 if (schq[i] || schq_contig[i])
1069 rc = mbox_process_msg(mbox, (void *)&rsp);
1073 nix_tm_copy_rsp_to_nix(nix, rsp);
1076 nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
1079 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1080 if (nix_tm_release_resources(nix, i, true, false))
1081 plt_err("Failed to release contig resources of "
1084 if (nix_tm_release_resources(nix, i, false, false))
1085 plt_err("Failed to release discontig resources of "
1093 nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
1095 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1096 uint32_t nonleaf_id = nix->nb_tx_queues;
1097 struct nix_tm_node *node = NULL;
1098 uint8_t leaf_lvl, lvl, lvl_end;
1102 /* Add ROOT, SCH1, SCH2, SCH3, [SCH4] nodes */
1103 parent = ROC_NIX_TM_NODE_ID_INVALID;
1104 /* With TL1 access we have an extra level */
1105 lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
1108 for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1110 node = nix_tm_node_alloc();
1114 node->id = nonleaf_id;
1115 node->parent_id = parent;
1117 node->weight = NIX_TM_DFLT_RR_WT;
1118 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1120 node->tree = ROC_NIX_TM_DEFAULT;
1122 rc = nix_tm_node_add(roc_nix, node);
1125 parent = nonleaf_id;
1129 parent = nonleaf_id - 1;
1130 leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1133 /* Add leaf nodes */
1134 for (i = 0; i < nix->nb_tx_queues; i++) {
1136 node = nix_tm_node_alloc();
1141 node->parent_id = parent;
1143 node->weight = NIX_TM_DFLT_RR_WT;
1144 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1145 node->lvl = leaf_lvl;
1146 node->tree = ROC_NIX_TM_DEFAULT;
1148 rc = nix_tm_node_add(roc_nix, node);
1155 nix_tm_node_free(node);
1160 nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
1162 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1163 uint32_t nonleaf_id = nix->nb_tx_queues;
1164 struct nix_tm_node *node = NULL;
1165 uint8_t leaf_lvl, lvl, lvl_end;
1169 /* Add ROOT, SCH1, SCH2 nodes */
1170 parent = ROC_NIX_TM_NODE_ID_INVALID;
1171 lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
1174 for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1176 node = nix_tm_node_alloc();
1180 node->id = nonleaf_id;
1181 node->parent_id = parent;
1183 node->weight = NIX_TM_DFLT_RR_WT;
1184 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1186 node->tree = ROC_NIX_TM_RLIMIT;
1188 rc = nix_tm_node_add(roc_nix, node);
1191 parent = nonleaf_id;
1195 /* SMQ is mapped to SCH4 when we have TL1 access and SCH3 otherwise */
1196 lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
1198 /* Add per queue SMQ nodes i.e SCH4 / SCH3 */
1199 for (i = 0; i < nix->nb_tx_queues; i++) {
1201 node = nix_tm_node_alloc();
1205 node->id = nonleaf_id + i;
1206 node->parent_id = parent;
1208 node->weight = NIX_TM_DFLT_RR_WT;
1209 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1211 node->tree = ROC_NIX_TM_RLIMIT;
1213 rc = nix_tm_node_add(roc_nix, node);
1218 parent = nonleaf_id;
1219 leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1222 /* Add leaf nodes */
1223 for (i = 0; i < nix->nb_tx_queues; i++) {
1225 node = nix_tm_node_alloc();
1230 node->parent_id = parent;
1232 node->weight = NIX_TM_DFLT_RR_WT;
1233 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1234 node->lvl = leaf_lvl;
1235 node->tree = ROC_NIX_TM_RLIMIT;
1237 rc = nix_tm_node_add(roc_nix, node);
1244 nix_tm_node_free(node);
1249 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
1251 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1252 struct nix_tm_shaper_profile *profile;
1253 struct nix_tm_node *node, *next_node;
1254 struct nix_tm_node_list *list;
1255 enum roc_nix_tm_tree tree;
1256 uint32_t profile_id;
1259 for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
1260 if (!(tree_mask & BIT(tree)))
1263 plt_tm_dbg("Freeing resources of tree %u", tree);
1265 list = nix_tm_node_list(nix, tree);
1266 next_node = TAILQ_FIRST(list);
1269 next_node = TAILQ_NEXT(node, node);
1271 if (!nix_tm_is_leaf(nix, node->lvl) &&
1272 node->flags & NIX_TM_NODE_HWRES) {
1273 /* Clear xoff in path for flush to succeed */
1274 rc = nix_tm_clear_path_xoff(nix, node);
1277 rc = nix_tm_free_node_resource(nix, node);
1283 /* Leave software elements if needed */
1287 next_node = TAILQ_FIRST(list);
1290 next_node = TAILQ_NEXT(node, node);
1292 plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
1295 profile_id = node->shaper_profile_id;
1296 profile = nix_tm_shaper_profile_search(nix, profile_id);
1300 TAILQ_REMOVE(list, node, node);
1301 nix_tm_node_free(node);
1308 nix_tm_conf_init(struct roc_nix *roc_nix)
1310 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1311 uint32_t bmp_sz, hw_lvl;
1315 PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1316 PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1317 ROC_NIX_TM_SHAPER_PROFILE_SZ);
1320 for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1321 TAILQ_INIT(&nix->trees[i]);
1323 TAILQ_INIT(&nix->shaper_profile_list);
1324 nix->tm_rate_min = 1E9; /* 1Gbps */
1327 bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1328 bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1331 nix->schq_bmp_mem = bmp_mem;
1333 /* Init contiguous and discontiguous bitmap per lvl */
1335 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1336 /* Bitmap for discontiguous resource */
1337 nix->schq_bmp[hw_lvl] =
1338 plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1339 if (!nix->schq_bmp[hw_lvl])
1342 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1344 /* Bitmap for contiguous resource */
1345 nix->schq_contig_bmp[hw_lvl] =
1346 plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1347 if (!nix->schq_contig_bmp[hw_lvl])
1350 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1353 /* Disable TL1 Static Priority when VF's are enabled
1354 * as otherwise VF's TL2 reallocation will be needed
1355 * runtime to support a specific topology of PF.
1357 if (nix->pci_dev->max_vfs)
1358 nix->tm_flags |= NIX_TM_TL1_NO_SP;
1360 /* TL1 access is only for PF's */
1361 if (roc_nix_is_pf(roc_nix)) {
1362 nix->tm_flags |= NIX_TM_TL1_ACCESS;
1363 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
1365 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
1370 nix_tm_conf_fini(roc_nix);
1375 nix_tm_conf_fini(struct roc_nix *roc_nix)
1377 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1380 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1381 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
1382 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
1384 plt_free(nix->schq_bmp_mem);