common/cnxk: support cn9k fast path security session
[dpdk.git] / drivers / common / cnxk / roc_nix_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 static inline int
9 bitmap_ctzll(uint64_t slab)
10 {
11         if (slab == 0)
12                 return 0;
13
14         return __builtin_ctzll(slab);
15 }
16
17 void
18 nix_tm_clear_shaper_profiles(struct nix *nix)
19 {
20         struct nix_tm_shaper_profile *shaper_profile;
21
22         shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
23         while (shaper_profile != NULL) {
24                 if (shaper_profile->ref_cnt)
25                         plt_warn("Shaper profile %u has non zero references",
26                                  shaper_profile->id);
27                 TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
28                 nix_tm_shaper_profile_free(shaper_profile);
29                 shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
30         }
31 }
32
33 static int
34 nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
35 {
36         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
37         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
38         struct nix_tm_shaper_profile *profile;
39         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
40         struct mbox *mbox = (&nix->dev)->mbox;
41         struct nix_txschq_config *req;
42         int rc = -EFAULT;
43         uint32_t hw_lvl;
44         uint8_t k = 0;
45
46         memset(regval, 0, sizeof(regval));
47         memset(regval_mask, 0, sizeof(regval_mask));
48
49         profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
50         hw_lvl = node->hw_lvl;
51
52         /* Need this trigger to configure TL1 */
53         if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
54                 /* Prepare default conf for TL1 */
55                 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
56                 req->lvl = NIX_TXSCH_LVL_TL1;
57
58                 k = nix_tm_tl1_default_prep(node->parent_hw_id, req->reg,
59                                             req->regval);
60                 req->num_regs = k;
61                 rc = mbox_process(mbox);
62                 if (rc)
63                         goto error;
64         }
65
66         /* Prepare topology config */
67         k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
68
69         /* Prepare schedule config */
70         k += nix_tm_sched_reg_prep(nix, node, &reg[k], &regval[k]);
71
72         /* Prepare shaping config */
73         k += nix_tm_shaper_reg_prep(node, profile, &reg[k], &regval[k]);
74
75         if (!k)
76                 return 0;
77
78         /* Copy and send config mbox */
79         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
80         req->lvl = hw_lvl;
81         req->num_regs = k;
82
83         mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
84         mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
85         mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
86
87         rc = mbox_process(mbox);
88         if (rc)
89                 goto error;
90
91         return 0;
92 error:
93         plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
94         return rc;
95 }
96
97 int
98 nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
99 {
100         struct nix_tm_node_list *list;
101         struct nix_tm_node *node;
102         uint32_t hw_lvl;
103         int rc = 0;
104
105         list = nix_tm_node_list(nix, tree);
106
107         for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
108                 TAILQ_FOREACH(node, list, node) {
109                         if (node->hw_lvl != hw_lvl)
110                                 continue;
111                         rc = nix_tm_node_reg_conf(nix, node);
112                         if (rc)
113                                 goto exit;
114                 }
115         }
116 exit:
117         return rc;
118 }
119
120 int
121 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
122 {
123         struct nix_tm_node *child, *parent;
124         struct nix_tm_node_list *list;
125         uint32_t rr_prio, max_prio;
126         uint32_t rr_num = 0;
127
128         list = nix_tm_node_list(nix, tree);
129
130         /* Release all the node hw resources locally
131          * if parent marked as dirty and resource exists.
132          */
133         TAILQ_FOREACH(child, list, node) {
134                 /* Release resource only if parent direct hierarchy changed */
135                 if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
136                     child->parent->child_realloc) {
137                         nix_tm_free_node_resource(nix, child);
138                 }
139                 child->max_prio = UINT32_MAX;
140         }
141
142         TAILQ_FOREACH(parent, list, node) {
143                 /* Count group of children of same priority i.e are RR */
144                 rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
145                                          &max_prio);
146
147                 /* Assuming that multiple RR groups are
148                  * not configured based on capability.
149                  */
150                 parent->rr_prio = rr_prio;
151                 parent->rr_num = rr_num;
152                 parent->max_prio = max_prio;
153         }
154
155         return 0;
156 }
157
158 static int
159 nix_tm_root_node_get(struct nix *nix, int tree)
160 {
161         struct nix_tm_node_list *list = nix_tm_node_list(nix, tree);
162         struct nix_tm_node *tm_node;
163
164         TAILQ_FOREACH(tm_node, list, node) {
165                 if (tm_node->hw_lvl == nix->tm_root_lvl)
166                         return 1;
167         }
168
169         return 0;
170 }
171
172 int
173 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
174 {
175         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
176         struct nix_tm_shaper_profile *profile;
177         uint32_t node_id, parent_id, lvl;
178         struct nix_tm_node *parent_node;
179         uint32_t priority, profile_id;
180         uint8_t hw_lvl, exp_next_lvl;
181         enum roc_nix_tm_tree tree;
182         int rc;
183
184         node_id = node->id;
185         priority = node->priority;
186         parent_id = node->parent_id;
187         profile_id = node->shaper_profile_id;
188         lvl = node->lvl;
189         tree = node->tree;
190
191         plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
192                    "parent %u profile 0x%x tree %u",
193                    nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
194                    priority, node->weight, parent_id, profile_id, tree);
195
196         if (tree >= ROC_NIX_TM_TREE_MAX)
197                 return NIX_ERR_PARAM;
198
199         /* Translate sw level id's to nix hw level id's */
200         hw_lvl = nix_tm_lvl2nix(nix, lvl);
201         if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
202                 return NIX_ERR_TM_INVALID_LVL;
203
204         /* Leaf nodes have to be same priority */
205         if (nix_tm_is_leaf(nix, lvl) && priority != 0)
206                 return NIX_ERR_TM_INVALID_PRIO;
207
208         parent_node = nix_tm_node_search(nix, parent_id, tree);
209
210         if (node_id < nix->nb_tx_queues)
211                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
212         else
213                 exp_next_lvl = hw_lvl + 1;
214
215         /* Check if there is no parent node yet */
216         if (hw_lvl != nix->tm_root_lvl &&
217             (!parent_node || parent_node->hw_lvl != exp_next_lvl))
218                 return NIX_ERR_TM_INVALID_PARENT;
219
220         /* Check if a node already exists */
221         if (nix_tm_node_search(nix, node_id, tree))
222                 return NIX_ERR_TM_NODE_EXISTS;
223
224         /* Check if root node exists */
225         if (hw_lvl == nix->tm_root_lvl && nix_tm_root_node_get(nix, tree))
226                 return NIX_ERR_TM_NODE_EXISTS;
227
228         profile = nix_tm_shaper_profile_search(nix, profile_id);
229         if (!nix_tm_is_leaf(nix, lvl)) {
230                 /* Check if shaper profile exists for non leaf node */
231                 if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
232                         return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
233
234                 /* Packet mode in profile should match with that of tm node */
235                 if (profile && profile->pkt_mode != node->pkt_mode)
236                         return NIX_ERR_TM_PKT_MODE_MISMATCH;
237         }
238
239         /* Check if there is second DWRR already in siblings or holes in prio */
240         rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
241         if (rc)
242                 return rc;
243
244         if (node->weight > roc_nix_tm_max_sched_wt_get())
245                 return NIX_ERR_TM_WEIGHT_EXCEED;
246
247         /* Maintain minimum weight */
248         if (!node->weight)
249                 node->weight = 1;
250
251         node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
252         node->rr_prio = 0xF;
253         node->max_prio = UINT32_MAX;
254         node->hw_id = NIX_TM_HW_ID_INVALID;
255         node->flags = 0;
256
257         if (profile)
258                 profile->ref_cnt++;
259
260         node->parent = parent_node;
261         if (parent_node)
262                 parent_node->child_realloc = true;
263         node->parent_hw_id = NIX_TM_HW_ID_INVALID;
264
265         TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
266         plt_tm_dbg("Added node %s lvl %u id %u (%p)",
267                    nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
268         return 0;
269 }
270
271 int
272 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
273 {
274         struct mbox *mbox = (&nix->dev)->mbox;
275         struct nix_txschq_config *req;
276         struct nix_tm_node *p;
277         int rc;
278
279         /* Enable nodes in path for flush to succeed */
280         if (!nix_tm_is_leaf(nix, node->lvl))
281                 p = node;
282         else
283                 p = node->parent;
284         while (p) {
285                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
286                     (p->flags & NIX_TM_NODE_HWRES)) {
287                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
288                         req->lvl = p->hw_lvl;
289                         req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
290                                                             req->regval);
291                         rc = mbox_process(mbox);
292                         if (rc)
293                                 return rc;
294
295                         p->flags |= NIX_TM_NODE_ENABLED;
296                 }
297                 p = p->parent;
298         }
299
300         return 0;
301 }
302
303 int
304 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
305 {
306         struct mbox *mbox = (&nix->dev)->mbox;
307         struct nix_txschq_config *req;
308         uint16_t smq;
309         int rc;
310
311         smq = node->hw_id;
312         plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
313                    enable ? "enable" : "disable");
314
315         rc = nix_tm_clear_path_xoff(nix, node);
316         if (rc)
317                 return rc;
318
319         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
320         req->lvl = NIX_TXSCH_LVL_SMQ;
321         req->num_regs = 1;
322
323         req->reg[0] = NIX_AF_SMQX_CFG(smq);
324         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
325         req->regval_mask[0] =
326                 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
327
328         return mbox_process(mbox);
329 }
330
331 int
332 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
333                      uint16_t *smq)
334 {
335         struct nix_tm_node *node;
336         int rc;
337
338         node = nix_tm_node_search(nix, sq, nix->tm_tree);
339
340         /* Check if we found a valid leaf node */
341         if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
342             node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
343                 return -EIO;
344         }
345
346         /* Get SMQ Id of leaf node's parent */
347         *smq = node->parent->hw_id;
348         *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
349
350         rc = nix_tm_smq_xoff(nix, node->parent, false);
351         if (rc)
352                 return rc;
353         node->flags |= NIX_TM_NODE_ENABLED;
354         return 0;
355 }
356
357 int
358 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
359 {
360         struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
361         uint16_t sqb_cnt, head_off, tail_off;
362         uint64_t wdata, val, prev;
363         uint16_t qid = sq->qid;
364         int64_t *regaddr;
365         uint64_t timeout; /* 10's of usec */
366
367         /* Wait for enough time based on shaper min rate */
368         timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
369         /* Wait for worst case scenario of this SQ being last priority
370          * and so have to wait for all other SQ's drain out by their own.
371          */
372         timeout = timeout * nix->nb_tx_queues;
373         timeout = timeout / nix->tm_rate_min;
374         if (!timeout)
375                 timeout = 10000;
376
377         wdata = ((uint64_t)qid << 32);
378         regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
379         val = roc_atomic64_add_nosync(wdata, regaddr);
380
381         /* Spin multiple iterations as "sq->fc_cache_pkts" can still
382          * have space to send pkts even though fc_mem is disabled
383          */
384
385         while (true) {
386                 prev = val;
387                 plt_delay_us(10);
388                 val = roc_atomic64_add_nosync(wdata, regaddr);
389                 /* Continue on error */
390                 if (val & BIT_ULL(63))
391                         continue;
392
393                 if (prev != val)
394                         continue;
395
396                 sqb_cnt = val & 0xFFFF;
397                 head_off = (val >> 20) & 0x3F;
398                 tail_off = (val >> 28) & 0x3F;
399
400                 /* SQ reached quiescent state */
401                 if (sqb_cnt <= 1 && head_off == tail_off &&
402                     (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
403                         break;
404                 }
405
406                 /* Timeout */
407                 if (!timeout)
408                         goto exit;
409                 timeout--;
410         }
411
412         return 0;
413 exit:
414         roc_nix_tm_dump(sq->roc_nix);
415         roc_nix_queues_ctx_dump(sq->roc_nix);
416         return -EFAULT;
417 }
418
419 /* Flush and disable tx queue and its parent SMQ */
420 int
421 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
422 {
423         struct roc_nix *roc_nix = sq->roc_nix;
424         struct nix_tm_node *node, *sibling;
425         struct nix_tm_node_list *list;
426         enum roc_nix_tm_tree tree;
427         struct mbox *mbox;
428         struct nix *nix;
429         uint16_t qid;
430         int rc;
431
432         nix = roc_nix_to_nix_priv(roc_nix);
433
434         /* Need not do anything if tree is in disabled state */
435         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
436                 return 0;
437
438         mbox = (&nix->dev)->mbox;
439         qid = sq->qid;
440
441         tree = nix->tm_tree;
442         list = nix_tm_node_list(nix, tree);
443
444         /* Find the node for this SQ */
445         node = nix_tm_node_search(nix, qid, tree);
446         if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
447                 plt_err("Invalid node/state for sq %u", qid);
448                 return -EFAULT;
449         }
450
451         /* Enable CGX RXTX to drain pkts */
452         if (!roc_nix->io_enabled) {
453                 /* Though it enables both RX MCAM Entries and CGX Link
454                  * we assume all the rx queues are stopped way back.
455                  */
456                 mbox_alloc_msg_nix_lf_start_rx(mbox);
457                 rc = mbox_process(mbox);
458                 if (rc) {
459                         plt_err("cgx start failed, rc=%d", rc);
460                         return rc;
461                 }
462         }
463
464         /* Disable smq xoff for case it was enabled earlier */
465         rc = nix_tm_smq_xoff(nix, node->parent, false);
466         if (rc) {
467                 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
468                         rc);
469                 return rc;
470         }
471
472         /* As per HRM, to disable an SQ, all other SQ's
473          * that feed to same SMQ must be paused before SMQ flush.
474          */
475         TAILQ_FOREACH(sibling, list, node) {
476                 if (sibling->parent != node->parent)
477                         continue;
478                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
479                         continue;
480
481                 qid = sibling->id;
482                 sq = nix->sqs[qid];
483                 if (!sq)
484                         continue;
485
486                 rc = roc_nix_tm_sq_aura_fc(sq, false);
487                 if (rc) {
488                         plt_err("Failed to disable sqb aura fc, rc=%d", rc);
489                         goto cleanup;
490                 }
491
492                 /* Wait for sq entries to be flushed */
493                 rc = roc_nix_tm_sq_flush_spin(sq);
494                 if (rc) {
495                         plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
496                         return rc;
497                 }
498         }
499
500         node->flags &= ~NIX_TM_NODE_ENABLED;
501
502         /* Disable and flush */
503         rc = nix_tm_smq_xoff(nix, node->parent, true);
504         if (rc) {
505                 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
506                         rc);
507                 goto cleanup;
508         }
509 cleanup:
510         /* Restore cgx state */
511         if (!roc_nix->io_enabled) {
512                 mbox_alloc_msg_nix_lf_stop_rx(mbox);
513                 rc |= mbox_process(mbox);
514         }
515
516         return rc;
517 }
518
519 int
520 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
521 {
522         struct roc_nix *roc_nix = sq->roc_nix;
523         struct nix_tm_node *node, *sibling;
524         struct nix_tm_node_list *list;
525         enum roc_nix_tm_tree tree;
526         struct roc_nix_sq *s_sq;
527         bool once = false;
528         uint16_t qid, s_qid;
529         struct nix *nix;
530         int rc;
531
532         nix = roc_nix_to_nix_priv(roc_nix);
533
534         /* Need not do anything if tree is in disabled state */
535         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
536                 return 0;
537
538         qid = sq->qid;
539         tree = nix->tm_tree;
540         list = nix_tm_node_list(nix, tree);
541
542         /* Find the node for this SQ */
543         node = nix_tm_node_search(nix, qid, tree);
544         if (!node) {
545                 plt_err("Invalid node for sq %u", qid);
546                 return -EFAULT;
547         }
548
549         /* Enable all the siblings back */
550         TAILQ_FOREACH(sibling, list, node) {
551                 if (sibling->parent != node->parent)
552                         continue;
553
554                 if (sibling->id == qid)
555                         continue;
556
557                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
558                         continue;
559
560                 s_qid = sibling->id;
561                 s_sq = nix->sqs[s_qid];
562                 if (!s_sq)
563                         continue;
564
565                 if (!once) {
566                         /* Enable back if any SQ is still present */
567                         rc = nix_tm_smq_xoff(nix, node->parent, false);
568                         if (rc) {
569                                 plt_err("Failed to enable smq %u, rc=%d",
570                                         node->parent->hw_id, rc);
571                                 return rc;
572                         }
573                         once = true;
574                 }
575
576                 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
577                 if (rc) {
578                         plt_err("Failed to enable sqb aura fc, rc=%d", rc);
579                         return rc;
580                 }
581         }
582
583         return 0;
584 }
585
586 int
587 nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
588                      bool rr_quantum_only)
589 {
590         struct mbox *mbox = (&nix->dev)->mbox;
591         uint16_t qid = node->id, smq;
592         uint64_t rr_quantum;
593         int rc;
594
595         smq = node->parent->hw_id;
596         rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
597
598         if (rr_quantum_only)
599                 plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
600                            rr_quantum);
601         else
602                 plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
603                            qid, smq, rr_quantum);
604
605         if (qid > nix->nb_tx_queues)
606                 return -EFAULT;
607
608         if (roc_model_is_cn9k()) {
609                 struct nix_aq_enq_req *aq;
610
611                 aq = mbox_alloc_msg_nix_aq_enq(mbox);
612                 aq->qidx = qid;
613                 aq->ctype = NIX_AQ_CTYPE_SQ;
614                 aq->op = NIX_AQ_INSTOP_WRITE;
615
616                 /* smq update only when needed */
617                 if (!rr_quantum_only) {
618                         aq->sq.smq = smq;
619                         aq->sq_mask.smq = ~aq->sq_mask.smq;
620                 }
621                 aq->sq.smq_rr_quantum = rr_quantum;
622                 aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
623         } else {
624                 struct nix_cn10k_aq_enq_req *aq;
625
626                 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
627                 aq->qidx = qid;
628                 aq->ctype = NIX_AQ_CTYPE_SQ;
629                 aq->op = NIX_AQ_INSTOP_WRITE;
630
631                 /* smq update only when needed */
632                 if (!rr_quantum_only) {
633                         aq->sq.smq = smq;
634                         aq->sq_mask.smq = ~aq->sq_mask.smq;
635                 }
636                 aq->sq.smq_rr_weight = rr_quantum;
637                 aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
638         }
639
640         rc = mbox_process(mbox);
641         if (rc)
642                 plt_err("Failed to set smq, rc=%d", rc);
643         return rc;
644 }
645
646 int
647 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
648                          bool above_thresh)
649 {
650         uint16_t avail, thresh, to_free = 0, schq;
651         struct mbox *mbox = (&nix->dev)->mbox;
652         struct nix_txsch_free_req *req;
653         struct plt_bitmap *bmp;
654         uint64_t slab = 0;
655         uint32_t pos = 0;
656         int rc = -ENOSPC;
657
658         bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
659         thresh =
660                 contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
661         plt_bitmap_scan_init(bmp);
662
663         avail = nix_tm_resource_avail(nix, hw_lvl, contig);
664
665         if (above_thresh) {
666                 /* Release only above threshold */
667                 if (avail > thresh)
668                         to_free = avail - thresh;
669         } else {
670                 /* Release everything */
671                 to_free = avail;
672         }
673
674         /* Now release resources to AF */
675         while (to_free) {
676                 if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
677                         break;
678
679                 schq = bitmap_ctzll(slab);
680                 slab &= ~(1ULL << schq);
681                 schq += pos;
682
683                 /* Free to AF */
684                 req = mbox_alloc_msg_nix_txsch_free(mbox);
685                 if (req == NULL)
686                         return rc;
687                 req->flags = 0;
688                 req->schq_lvl = hw_lvl;
689                 req->schq = schq;
690                 rc = mbox_process(mbox);
691                 if (rc) {
692                         plt_err("failed to release hwres %s(%u) rc %d",
693                                 nix_tm_hwlvl2str(hw_lvl), schq, rc);
694                         return rc;
695                 }
696
697                 plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
698                            schq);
699                 plt_bitmap_clear(bmp, schq);
700                 to_free--;
701         }
702
703         if (to_free) {
704                 plt_err("resource inconsistency for %s(%u)",
705                         nix_tm_hwlvl2str(hw_lvl), contig);
706                 return -EFAULT;
707         }
708         return 0;
709 }
710
711 int
712 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
713 {
714         struct mbox *mbox = (&nix->dev)->mbox;
715         struct nix_txsch_free_req *req;
716         struct plt_bitmap *bmp;
717         uint16_t avail, hw_id;
718         uint8_t hw_lvl;
719         int rc = -ENOSPC;
720
721         hw_lvl = node->hw_lvl;
722         hw_id = node->hw_id;
723         bmp = nix->schq_bmp[hw_lvl];
724         /* Free specific HW resource */
725         plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
726                    nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
727                    node);
728
729         avail = nix_tm_resource_avail(nix, hw_lvl, false);
730         /* Always for now free to discontiguous queue when avail
731          * is not sufficient.
732          */
733         if (nix->discontig_rsvd[hw_lvl] &&
734             avail < nix->discontig_rsvd[hw_lvl]) {
735                 PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
736                 PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
737                 plt_bitmap_set(bmp, hw_id);
738                 node->hw_id = NIX_TM_HW_ID_INVALID;
739                 node->flags &= ~NIX_TM_NODE_HWRES;
740                 return 0;
741         }
742
743         /* Free to AF */
744         req = mbox_alloc_msg_nix_txsch_free(mbox);
745         if (req == NULL)
746                 return rc;
747         req->flags = 0;
748         req->schq_lvl = node->hw_lvl;
749         req->schq = hw_id;
750         rc = mbox_process(mbox);
751         if (rc) {
752                 plt_err("failed to release hwres %s(%u) rc %d",
753                         nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
754                 return rc;
755         }
756
757         /* Mark parent as dirty for reallocing it's children */
758         if (node->parent)
759                 node->parent->child_realloc = true;
760
761         node->hw_id = NIX_TM_HW_ID_INVALID;
762         node->flags &= ~NIX_TM_NODE_HWRES;
763         plt_tm_dbg("Released hwres %s(%u) to af",
764                    nix_tm_hwlvl2str(node->hw_lvl), hw_id);
765         return 0;
766 }
767
768 int
769 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
770                    enum roc_nix_tm_tree tree, bool free)
771 {
772         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
773         struct nix_tm_shaper_profile *profile;
774         struct nix_tm_node *node, *child;
775         struct nix_tm_node_list *list;
776         uint32_t profile_id;
777         int rc;
778
779         plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
780
781         node = nix_tm_node_search(nix, node_id, tree);
782         if (!node)
783                 return NIX_ERR_TM_INVALID_NODE;
784
785         list = nix_tm_node_list(nix, tree);
786         /* Check for any existing children */
787         TAILQ_FOREACH(child, list, node) {
788                 if (child->parent == node)
789                         return NIX_ERR_TM_CHILD_EXISTS;
790         }
791
792         /* Remove shaper profile reference */
793         profile_id = node->shaper_profile_id;
794         profile = nix_tm_shaper_profile_search(nix, profile_id);
795
796         /* Free hw resource locally */
797         if (node->flags & NIX_TM_NODE_HWRES) {
798                 rc = nix_tm_free_node_resource(nix, node);
799                 if (rc)
800                         return rc;
801         }
802
803         if (profile)
804                 profile->ref_cnt--;
805
806         TAILQ_REMOVE(list, node, node);
807
808         plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
809                    "parent %u profile 0x%x tree %u (%p)",
810                    nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
811                    node->priority, node->weight,
812                    node->parent ? node->parent->id : UINT32_MAX,
813                    node->shaper_profile_id, tree, node);
814         /* Free only if requested */
815         if (free)
816                 nix_tm_node_free(node);
817         return 0;
818 }
819
820 static int
821 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
822                     uint16_t *contig_id, int *contig_cnt,
823                     struct nix_tm_node_list *list)
824 {
825         struct nix_tm_node *child;
826         struct plt_bitmap *bmp;
827         uint8_t child_hw_lvl;
828         int spare_schq = -1;
829         uint32_t pos = 0;
830         uint64_t slab;
831         uint16_t schq;
832
833         child_hw_lvl = parent->hw_lvl - 1;
834         bmp = nix->schq_bmp[child_hw_lvl];
835         plt_bitmap_scan_init(bmp);
836         slab = 0;
837
838         /* Save spare schq if it is case of RR + SP */
839         if (parent->rr_prio != 0xf && *contig_cnt > 1)
840                 spare_schq = *contig_id + parent->rr_prio;
841
842         TAILQ_FOREACH(child, list, node) {
843                 if (!child->parent)
844                         continue;
845                 if (child->parent->id != parent->id)
846                         continue;
847
848                 /* Resource never expected to be present */
849                 if (child->flags & NIX_TM_NODE_HWRES) {
850                         plt_err("Resource exists for child (%s)%u, id %u (%p)",
851                                 nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
852                                 child->id, child);
853                         return -EFAULT;
854                 }
855
856                 if (!slab)
857                         plt_bitmap_scan(bmp, &pos, &slab);
858
859                 if (child->priority == parent->rr_prio && spare_schq != -1) {
860                         /* Use spare schq first if present */
861                         schq = spare_schq;
862                         spare_schq = -1;
863                         *contig_cnt = *contig_cnt - 1;
864
865                 } else if (child->priority == parent->rr_prio) {
866                         /* Assign a discontiguous queue */
867                         if (!slab) {
868                                 plt_err("Schq not found for Child %u "
869                                         "lvl %u (%p)",
870                                         child->id, child->lvl, child);
871                                 return -ENOENT;
872                         }
873
874                         schq = bitmap_ctzll(slab);
875                         slab &= ~(1ULL << schq);
876                         schq += pos;
877                         plt_bitmap_clear(bmp, schq);
878                 } else {
879                         /* Assign a contiguous queue */
880                         schq = *contig_id + child->priority;
881                         *contig_cnt = *contig_cnt - 1;
882                 }
883
884                 plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
885                            nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
886                            child->id, child);
887
888                 child->hw_id = schq;
889                 child->parent_hw_id = parent->hw_id;
890                 child->flags |= NIX_TM_NODE_HWRES;
891         }
892
893         return 0;
894 }
895
896 int
897 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
898 {
899         struct nix_tm_node *parent, *root = NULL;
900         struct plt_bitmap *bmp, *bmp_contig;
901         struct nix_tm_node_list *list;
902         uint8_t child_hw_lvl, hw_lvl;
903         uint16_t contig_id, j;
904         uint64_t slab = 0;
905         uint32_t pos = 0;
906         int cnt, rc;
907
908         list = nix_tm_node_list(nix, tree);
909         /* Walk from TL1 to TL4 parents */
910         for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
911                 TAILQ_FOREACH(parent, list, node) {
912                         child_hw_lvl = parent->hw_lvl - 1;
913                         if (parent->hw_lvl != hw_lvl)
914                                 continue;
915
916                         /* Remember root for future */
917                         if (parent->hw_lvl == nix->tm_root_lvl)
918                                 root = parent;
919
920                         if (!parent->child_realloc) {
921                                 /* Skip when parent is not dirty */
922                                 if (nix_tm_child_res_valid(list, parent))
923                                         continue;
924                                 plt_err("Parent not dirty but invalid "
925                                         "child res parent id %u(lvl %u)",
926                                         parent->id, parent->lvl);
927                                 return -EFAULT;
928                         }
929
930                         bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
931
932                         /* Prealloc contiguous indices for a parent */
933                         contig_id = NIX_TM_MAX_HW_TXSCHQ;
934                         cnt = (int)parent->max_prio + 1;
935                         if (cnt > 0) {
936                                 plt_bitmap_scan_init(bmp_contig);
937                                 if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
938                                         plt_err("Contig schq not found");
939                                         return -ENOENT;
940                                 }
941                                 contig_id = pos + bitmap_ctzll(slab);
942
943                                 /* Check if we have enough */
944                                 for (j = contig_id; j < contig_id + cnt; j++) {
945                                         if (!plt_bitmap_get(bmp_contig, j))
946                                                 break;
947                                 }
948
949                                 if (j != contig_id + cnt) {
950                                         plt_err("Contig schq not sufficient");
951                                         return -ENOENT;
952                                 }
953
954                                 for (j = contig_id; j < contig_id + cnt; j++)
955                                         plt_bitmap_clear(bmp_contig, j);
956                         }
957
958                         /* Assign hw id to all children */
959                         rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
960                                                  list);
961                         if (cnt || rc) {
962                                 plt_err("Unexpected err, contig res alloc, "
963                                         "parent %u, of %s, rc=%d, cnt=%d",
964                                         parent->id, nix_tm_hwlvl2str(hw_lvl),
965                                         rc, cnt);
966                                 return -EFAULT;
967                         }
968
969                         /* Clear the dirty bit as children's
970                          * resources are reallocated.
971                          */
972                         parent->child_realloc = false;
973                 }
974         }
975
976         /* Root is always expected to be there */
977         if (!root)
978                 return -EFAULT;
979
980         if (root->flags & NIX_TM_NODE_HWRES)
981                 return 0;
982
983         /* Process root node */
984         bmp = nix->schq_bmp[nix->tm_root_lvl];
985         plt_bitmap_scan_init(bmp);
986         if (!plt_bitmap_scan(bmp, &pos, &slab)) {
987                 plt_err("Resource not allocated for root");
988                 return -EIO;
989         }
990
991         root->hw_id = pos + bitmap_ctzll(slab);
992         root->flags |= NIX_TM_NODE_HWRES;
993         plt_bitmap_clear(bmp, root->hw_id);
994
995         /* Get TL1 id as well when root is not TL1 */
996         if (!nix_tm_have_tl1_access(nix)) {
997                 bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
998
999                 plt_bitmap_scan_init(bmp);
1000                 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1001                         plt_err("Resource not found for TL1");
1002                         return -EIO;
1003                 }
1004                 root->parent_hw_id = pos + bitmap_ctzll(slab);
1005                 plt_bitmap_clear(bmp, root->parent_hw_id);
1006         }
1007
1008         plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
1009                    nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
1010
1011         return 0;
1012 }
1013
1014 void
1015 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
1016 {
1017         uint8_t lvl;
1018         uint16_t i;
1019
1020         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1021                 for (i = 0; i < rsp->schq[lvl]; i++)
1022                         plt_bitmap_set(nix->schq_bmp[lvl],
1023                                        rsp->schq_list[lvl][i]);
1024
1025                 for (i = 0; i < rsp->schq_contig[lvl]; i++)
1026                         plt_bitmap_set(nix->schq_contig_bmp[lvl],
1027                                        rsp->schq_contig_list[lvl][i]);
1028         }
1029 }
1030
1031 int
1032 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
1033 {
1034         uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
1035         struct mbox *mbox = (&nix->dev)->mbox;
1036         uint16_t schq[NIX_TXSCH_LVL_CNT];
1037         struct nix_txsch_alloc_req *req;
1038         struct nix_txsch_alloc_rsp *rsp;
1039         uint8_t hw_lvl, i;
1040         bool pend;
1041         int rc;
1042
1043         memset(schq, 0, sizeof(schq));
1044         memset(schq_contig, 0, sizeof(schq_contig));
1045
1046         /* Estimate requirement */
1047         rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
1048         if (!rc)
1049                 return 0;
1050
1051         /* Release existing contiguous resources when realloc requested
1052          * as there is no way to guarantee continuity of old with new.
1053          */
1054         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1055                 if (schq_contig[hw_lvl])
1056                         nix_tm_release_resources(nix, hw_lvl, true, false);
1057         }
1058
1059         /* Alloc as needed */
1060         do {
1061                 pend = false;
1062                 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
1063                 if (!req) {
1064                         rc = -ENOMEM;
1065                         goto alloc_err;
1066                 }
1067                 mbox_memcpy(req->schq, schq, sizeof(req->schq));
1068                 mbox_memcpy(req->schq_contig, schq_contig,
1069                             sizeof(req->schq_contig));
1070
1071                 /* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
1072                  * So split alloc to multiple requests.
1073                  */
1074                 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1075                         if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
1076                                 req->schq[i] = MAX_TXSCHQ_PER_FUNC;
1077                         schq[i] -= req->schq[i];
1078
1079                         if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
1080                                 req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
1081                         schq_contig[i] -= req->schq_contig[i];
1082
1083                         if (schq[i] || schq_contig[i])
1084                                 pend = true;
1085                 }
1086
1087                 rc = mbox_process_msg(mbox, (void *)&rsp);
1088                 if (rc)
1089                         goto alloc_err;
1090
1091                 nix_tm_copy_rsp_to_nix(nix, rsp);
1092         } while (pend);
1093
1094         nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
1095         return 0;
1096 alloc_err:
1097         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1098                 if (nix_tm_release_resources(nix, i, true, false))
1099                         plt_err("Failed to release contig resources of "
1100                                 "lvl %d on error",
1101                                 i);
1102                 if (nix_tm_release_resources(nix, i, false, false))
1103                         plt_err("Failed to release discontig resources of "
1104                                 "lvl %d on error",
1105                                 i);
1106         }
1107         return rc;
1108 }
1109
1110 int
1111 nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
1112 {
1113         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1114         uint32_t nonleaf_id = nix->nb_tx_queues;
1115         struct nix_tm_node *node = NULL;
1116         uint8_t leaf_lvl, lvl, lvl_end;
1117         uint32_t parent, i;
1118         int rc = 0;
1119
1120         /* Add ROOT, SCH1, SCH2, SCH3, [SCH4]  nodes */
1121         parent = ROC_NIX_TM_NODE_ID_INVALID;
1122         /* With TL1 access we have an extra level */
1123         lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
1124                                                        ROC_TM_LVL_SCH3);
1125
1126         for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1127                 rc = -ENOMEM;
1128                 node = nix_tm_node_alloc();
1129                 if (!node)
1130                         goto error;
1131
1132                 node->id = nonleaf_id;
1133                 node->parent_id = parent;
1134                 node->priority = 0;
1135                 node->weight = NIX_TM_DFLT_RR_WT;
1136                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1137                 node->lvl = lvl;
1138                 node->tree = ROC_NIX_TM_DEFAULT;
1139
1140                 rc = nix_tm_node_add(roc_nix, node);
1141                 if (rc)
1142                         goto error;
1143                 parent = nonleaf_id;
1144                 nonleaf_id++;
1145         }
1146
1147         parent = nonleaf_id - 1;
1148         leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1149                                                         ROC_TM_LVL_SCH4);
1150
1151         /* Add leaf nodes */
1152         for (i = 0; i < nix->nb_tx_queues; i++) {
1153                 rc = -ENOMEM;
1154                 node = nix_tm_node_alloc();
1155                 if (!node)
1156                         goto error;
1157
1158                 node->id = i;
1159                 node->parent_id = parent;
1160                 node->priority = 0;
1161                 node->weight = NIX_TM_DFLT_RR_WT;
1162                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1163                 node->lvl = leaf_lvl;
1164                 node->tree = ROC_NIX_TM_DEFAULT;
1165
1166                 rc = nix_tm_node_add(roc_nix, node);
1167                 if (rc)
1168                         goto error;
1169         }
1170
1171         return 0;
1172 error:
1173         nix_tm_node_free(node);
1174         return rc;
1175 }
1176
1177 int
1178 roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
1179 {
1180         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1181         uint32_t nonleaf_id = nix->nb_tx_queues;
1182         struct nix_tm_node *node = NULL;
1183         uint8_t leaf_lvl, lvl, lvl_end;
1184         uint32_t parent, i;
1185         int rc = 0;
1186
1187         /* Add ROOT, SCH1, SCH2 nodes */
1188         parent = ROC_NIX_TM_NODE_ID_INVALID;
1189         lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
1190                                                        ROC_TM_LVL_SCH2);
1191
1192         for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1193                 rc = -ENOMEM;
1194                 node = nix_tm_node_alloc();
1195                 if (!node)
1196                         goto error;
1197
1198                 node->id = nonleaf_id;
1199                 node->parent_id = parent;
1200                 node->priority = 0;
1201                 node->weight = NIX_TM_DFLT_RR_WT;
1202                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1203                 node->lvl = lvl;
1204                 node->tree = ROC_NIX_TM_RLIMIT;
1205
1206                 rc = nix_tm_node_add(roc_nix, node);
1207                 if (rc)
1208                         goto error;
1209                 parent = nonleaf_id;
1210                 nonleaf_id++;
1211         }
1212
1213         /* SMQ is mapped to SCH4 when we have TL1 access and SCH3 otherwise */
1214         lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
1215
1216         /* Add per queue SMQ nodes i.e SCH4 / SCH3 */
1217         for (i = 0; i < nix->nb_tx_queues; i++) {
1218                 rc = -ENOMEM;
1219                 node = nix_tm_node_alloc();
1220                 if (!node)
1221                         goto error;
1222
1223                 node->id = nonleaf_id + i;
1224                 node->parent_id = parent;
1225                 node->priority = 0;
1226                 node->weight = NIX_TM_DFLT_RR_WT;
1227                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1228                 node->lvl = lvl;
1229                 node->tree = ROC_NIX_TM_RLIMIT;
1230
1231                 rc = nix_tm_node_add(roc_nix, node);
1232                 if (rc)
1233                         goto error;
1234         }
1235
1236         parent = nonleaf_id;
1237         leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1238                                                         ROC_TM_LVL_SCH4);
1239
1240         /* Add leaf nodes */
1241         for (i = 0; i < nix->nb_tx_queues; i++) {
1242                 rc = -ENOMEM;
1243                 node = nix_tm_node_alloc();
1244                 if (!node)
1245                         goto error;
1246
1247                 node->id = i;
1248                 node->parent_id = parent + i;
1249                 node->priority = 0;
1250                 node->weight = NIX_TM_DFLT_RR_WT;
1251                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1252                 node->lvl = leaf_lvl;
1253                 node->tree = ROC_NIX_TM_RLIMIT;
1254
1255                 rc = nix_tm_node_add(roc_nix, node);
1256                 if (rc)
1257                         goto error;
1258         }
1259
1260         return 0;
1261 error:
1262         nix_tm_node_free(node);
1263         return rc;
1264 }
1265
1266 int
1267 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
1268 {
1269         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1270         struct nix_tm_shaper_profile *profile;
1271         struct nix_tm_node *node, *next_node;
1272         struct nix_tm_node_list *list;
1273         enum roc_nix_tm_tree tree;
1274         uint32_t profile_id;
1275         int rc = 0;
1276
1277         for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
1278                 if (!(tree_mask & BIT(tree)))
1279                         continue;
1280
1281                 plt_tm_dbg("Freeing resources of tree %u", tree);
1282
1283                 list = nix_tm_node_list(nix, tree);
1284                 next_node = TAILQ_FIRST(list);
1285                 while (next_node) {
1286                         node = next_node;
1287                         next_node = TAILQ_NEXT(node, node);
1288
1289                         if (!nix_tm_is_leaf(nix, node->lvl) &&
1290                             node->flags & NIX_TM_NODE_HWRES) {
1291                                 /* Clear xoff in path for flush to succeed */
1292                                 rc = nix_tm_clear_path_xoff(nix, node);
1293                                 if (rc)
1294                                         return rc;
1295                                 rc = nix_tm_free_node_resource(nix, node);
1296                                 if (rc)
1297                                         return rc;
1298                         }
1299                 }
1300
1301                 /* Leave software elements if needed */
1302                 if (hw_only)
1303                         continue;
1304
1305                 next_node = TAILQ_FIRST(list);
1306                 while (next_node) {
1307                         node = next_node;
1308                         next_node = TAILQ_NEXT(node, node);
1309
1310                         plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
1311                                    node->id, node);
1312
1313                         profile_id = node->shaper_profile_id;
1314                         profile = nix_tm_shaper_profile_search(nix, profile_id);
1315                         if (profile)
1316                                 profile->ref_cnt--;
1317
1318                         TAILQ_REMOVE(list, node, node);
1319                         nix_tm_node_free(node);
1320                 }
1321         }
1322         return rc;
1323 }
1324
1325 int
1326 nix_tm_conf_init(struct roc_nix *roc_nix)
1327 {
1328         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1329         uint32_t bmp_sz, hw_lvl;
1330         void *bmp_mem;
1331         int rc, i;
1332
1333         PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1334         PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1335                           ROC_NIX_TM_SHAPER_PROFILE_SZ);
1336
1337         nix->tm_flags = 0;
1338         for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1339                 TAILQ_INIT(&nix->trees[i]);
1340
1341         TAILQ_INIT(&nix->shaper_profile_list);
1342         nix->tm_rate_min = 1E9; /* 1Gbps */
1343
1344         rc = -ENOMEM;
1345         bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1346         bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1347         if (!bmp_mem)
1348                 return rc;
1349         nix->schq_bmp_mem = bmp_mem;
1350
1351         /* Init contiguous and discontiguous bitmap per lvl */
1352         rc = -EIO;
1353         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1354                 /* Bitmap for discontiguous resource */
1355                 nix->schq_bmp[hw_lvl] =
1356                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1357                 if (!nix->schq_bmp[hw_lvl])
1358                         goto exit;
1359
1360                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1361
1362                 /* Bitmap for contiguous resource */
1363                 nix->schq_contig_bmp[hw_lvl] =
1364                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1365                 if (!nix->schq_contig_bmp[hw_lvl])
1366                         goto exit;
1367
1368                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1369         }
1370
1371         /* Disable TL1 Static Priority when VF's are enabled
1372          * as otherwise VF's TL2 reallocation will be needed
1373          * runtime to support a specific topology of PF.
1374          */
1375         if (nix->pci_dev->max_vfs)
1376                 nix->tm_flags |= NIX_TM_TL1_NO_SP;
1377
1378         /* TL1 access is only for PF's */
1379         if (roc_nix_is_pf(roc_nix)) {
1380                 nix->tm_flags |= NIX_TM_TL1_ACCESS;
1381                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
1382         } else {
1383                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
1384         }
1385
1386         return 0;
1387 exit:
1388         nix_tm_conf_fini(roc_nix);
1389         return rc;
1390 }
1391
1392 void
1393 nix_tm_conf_fini(struct roc_nix *roc_nix)
1394 {
1395         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1396         uint16_t hw_lvl;
1397
1398         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1399                 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
1400                 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
1401         }
1402         plt_free(nix->schq_bmp_mem);
1403 }