common/cnxk: add NPC parsing API
[dpdk.git] / drivers / common / cnxk / roc_nix_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 static inline int
9 bitmap_ctzll(uint64_t slab)
10 {
11         if (slab == 0)
12                 return 0;
13
14         return __builtin_ctzll(slab);
15 }
16
17 void
18 nix_tm_clear_shaper_profiles(struct nix *nix)
19 {
20         struct nix_tm_shaper_profile *shaper_profile;
21
22         shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
23         while (shaper_profile != NULL) {
24                 if (shaper_profile->ref_cnt)
25                         plt_warn("Shaper profile %u has non zero references",
26                                  shaper_profile->id);
27                 TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
28                 nix_tm_shaper_profile_free(shaper_profile);
29                 shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
30         }
31 }
32
33 static int
34 nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
35 {
36         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
37         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
38         struct nix_tm_shaper_profile *profile;
39         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
40         struct mbox *mbox = (&nix->dev)->mbox;
41         struct nix_txschq_config *req;
42         int rc = -EFAULT;
43         uint32_t hw_lvl;
44         uint8_t k = 0;
45
46         memset(regval, 0, sizeof(regval));
47         memset(regval_mask, 0, sizeof(regval_mask));
48
49         profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
50         hw_lvl = node->hw_lvl;
51
52         /* Need this trigger to configure TL1 */
53         if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
54                 /* Prepare default conf for TL1 */
55                 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
56                 req->lvl = NIX_TXSCH_LVL_TL1;
57
58                 k = nix_tm_tl1_default_prep(node->parent_hw_id, req->reg,
59                                             req->regval);
60                 req->num_regs = k;
61                 rc = mbox_process(mbox);
62                 if (rc)
63                         goto error;
64         }
65
66         /* Prepare topology config */
67         k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
68
69         /* Prepare schedule config */
70         k += nix_tm_sched_reg_prep(nix, node, &reg[k], &regval[k]);
71
72         /* Prepare shaping config */
73         k += nix_tm_shaper_reg_prep(node, profile, &reg[k], &regval[k]);
74
75         if (!k)
76                 return 0;
77
78         /* Copy and send config mbox */
79         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
80         req->lvl = hw_lvl;
81         req->num_regs = k;
82
83         mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
84         mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
85         mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
86
87         rc = mbox_process(mbox);
88         if (rc)
89                 goto error;
90
91         return 0;
92 error:
93         plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
94         return rc;
95 }
96
97 int
98 nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
99 {
100         struct nix_tm_node_list *list;
101         struct nix_tm_node *node;
102         uint32_t hw_lvl;
103         int rc = 0;
104
105         list = nix_tm_node_list(nix, tree);
106
107         for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
108                 TAILQ_FOREACH(node, list, node) {
109                         if (node->hw_lvl != hw_lvl)
110                                 continue;
111                         rc = nix_tm_node_reg_conf(nix, node);
112                         if (rc)
113                                 goto exit;
114                 }
115         }
116 exit:
117         return rc;
118 }
119
120 int
121 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
122 {
123         struct nix_tm_node *child, *parent;
124         struct nix_tm_node_list *list;
125         uint32_t rr_prio, max_prio;
126         uint32_t rr_num = 0;
127
128         list = nix_tm_node_list(nix, tree);
129
130         /* Release all the node hw resources locally
131          * if parent marked as dirty and resource exists.
132          */
133         TAILQ_FOREACH(child, list, node) {
134                 /* Release resource only if parent direct hierarchy changed */
135                 if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
136                     child->parent->child_realloc) {
137                         nix_tm_free_node_resource(nix, child);
138                 }
139                 child->max_prio = UINT32_MAX;
140         }
141
142         TAILQ_FOREACH(parent, list, node) {
143                 /* Count group of children of same priority i.e are RR */
144                 rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
145                                          &max_prio);
146
147                 /* Assuming that multiple RR groups are
148                  * not configured based on capability.
149                  */
150                 parent->rr_prio = rr_prio;
151                 parent->rr_num = rr_num;
152                 parent->max_prio = max_prio;
153         }
154
155         return 0;
156 }
157
158 int
159 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
160 {
161         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
162         struct nix_tm_shaper_profile *profile;
163         uint32_t node_id, parent_id, lvl;
164         struct nix_tm_node *parent_node;
165         uint32_t priority, profile_id;
166         uint8_t hw_lvl, exp_next_lvl;
167         enum roc_nix_tm_tree tree;
168         int rc;
169
170         node_id = node->id;
171         priority = node->priority;
172         parent_id = node->parent_id;
173         profile_id = node->shaper_profile_id;
174         lvl = node->lvl;
175         tree = node->tree;
176
177         plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
178                    "parent %u profile 0x%x tree %u",
179                    nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
180                    priority, node->weight, parent_id, profile_id, tree);
181
182         if (tree >= ROC_NIX_TM_TREE_MAX)
183                 return NIX_ERR_PARAM;
184
185         /* Translate sw level id's to nix hw level id's */
186         hw_lvl = nix_tm_lvl2nix(nix, lvl);
187         if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
188                 return NIX_ERR_TM_INVALID_LVL;
189
190         /* Leaf nodes have to be same priority */
191         if (nix_tm_is_leaf(nix, lvl) && priority != 0)
192                 return NIX_ERR_TM_INVALID_PRIO;
193
194         parent_node = nix_tm_node_search(nix, parent_id, tree);
195
196         if (node_id < nix->nb_tx_queues)
197                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
198         else
199                 exp_next_lvl = hw_lvl + 1;
200
201         /* Check if there is no parent node yet */
202         if (hw_lvl != nix->tm_root_lvl &&
203             (!parent_node || parent_node->hw_lvl != exp_next_lvl))
204                 return NIX_ERR_TM_INVALID_PARENT;
205
206         /* Check if a node already exists */
207         if (nix_tm_node_search(nix, node_id, tree))
208                 return NIX_ERR_TM_NODE_EXISTS;
209
210         profile = nix_tm_shaper_profile_search(nix, profile_id);
211         if (!nix_tm_is_leaf(nix, lvl)) {
212                 /* Check if shaper profile exists for non leaf node */
213                 if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
214                         return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
215
216                 /* Packet mode in profile should match with that of tm node */
217                 if (profile && profile->pkt_mode != node->pkt_mode)
218                         return NIX_ERR_TM_PKT_MODE_MISMATCH;
219         }
220
221         /* Check if there is second DWRR already in siblings or holes in prio */
222         rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
223         if (rc)
224                 return rc;
225
226         if (node->weight > ROC_NIX_TM_MAX_SCHED_WT)
227                 return NIX_ERR_TM_WEIGHT_EXCEED;
228
229         /* Maintain minimum weight */
230         if (!node->weight)
231                 node->weight = 1;
232
233         node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
234         node->rr_prio = 0xF;
235         node->max_prio = UINT32_MAX;
236         node->hw_id = NIX_TM_HW_ID_INVALID;
237         node->flags = 0;
238
239         if (profile)
240                 profile->ref_cnt++;
241
242         node->parent = parent_node;
243         if (parent_node)
244                 parent_node->child_realloc = true;
245         node->parent_hw_id = NIX_TM_HW_ID_INVALID;
246
247         TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
248         plt_tm_dbg("Added node %s lvl %u id %u (%p)",
249                    nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
250         return 0;
251 }
252
253 int
254 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
255 {
256         struct mbox *mbox = (&nix->dev)->mbox;
257         struct nix_txschq_config *req;
258         struct nix_tm_node *p;
259         int rc;
260
261         /* Enable nodes in path for flush to succeed */
262         if (!nix_tm_is_leaf(nix, node->lvl))
263                 p = node;
264         else
265                 p = node->parent;
266         while (p) {
267                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
268                     (p->flags & NIX_TM_NODE_HWRES)) {
269                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
270                         req->lvl = p->hw_lvl;
271                         req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
272                                                             req->regval);
273                         rc = mbox_process(mbox);
274                         if (rc)
275                                 return rc;
276
277                         p->flags |= NIX_TM_NODE_ENABLED;
278                 }
279                 p = p->parent;
280         }
281
282         return 0;
283 }
284
285 int
286 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
287 {
288         struct mbox *mbox = (&nix->dev)->mbox;
289         struct nix_txschq_config *req;
290         uint16_t smq;
291         int rc;
292
293         smq = node->hw_id;
294         plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
295                    enable ? "enable" : "disable");
296
297         rc = nix_tm_clear_path_xoff(nix, node);
298         if (rc)
299                 return rc;
300
301         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
302         req->lvl = NIX_TXSCH_LVL_SMQ;
303         req->num_regs = 1;
304
305         req->reg[0] = NIX_AF_SMQX_CFG(smq);
306         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
307         req->regval_mask[0] =
308                 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
309
310         return mbox_process(mbox);
311 }
312
313 int
314 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
315                      uint16_t *smq)
316 {
317         struct nix_tm_node *node;
318         int rc;
319
320         node = nix_tm_node_search(nix, sq, nix->tm_tree);
321
322         /* Check if we found a valid leaf node */
323         if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
324             node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
325                 return -EIO;
326         }
327
328         /* Get SMQ Id of leaf node's parent */
329         *smq = node->parent->hw_id;
330         *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
331
332         rc = nix_tm_smq_xoff(nix, node->parent, false);
333         if (rc)
334                 return rc;
335         node->flags |= NIX_TM_NODE_ENABLED;
336         return 0;
337 }
338
339 int
340 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
341 {
342         struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
343         uint16_t sqb_cnt, head_off, tail_off;
344         uint64_t wdata, val, prev;
345         uint16_t qid = sq->qid;
346         int64_t *regaddr;
347         uint64_t timeout; /* 10's of usec */
348
349         /* Wait for enough time based on shaper min rate */
350         timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
351         /* Wait for worst case scenario of this SQ being last priority
352          * and so have to wait for all other SQ's drain out by their own.
353          */
354         timeout = timeout * nix->nb_tx_queues;
355         timeout = timeout / nix->tm_rate_min;
356         if (!timeout)
357                 timeout = 10000;
358
359         wdata = ((uint64_t)qid << 32);
360         regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
361         val = roc_atomic64_add_nosync(wdata, regaddr);
362
363         /* Spin multiple iterations as "sq->fc_cache_pkts" can still
364          * have space to send pkts even though fc_mem is disabled
365          */
366
367         while (true) {
368                 prev = val;
369                 plt_delay_us(10);
370                 val = roc_atomic64_add_nosync(wdata, regaddr);
371                 /* Continue on error */
372                 if (val & BIT_ULL(63))
373                         continue;
374
375                 if (prev != val)
376                         continue;
377
378                 sqb_cnt = val & 0xFFFF;
379                 head_off = (val >> 20) & 0x3F;
380                 tail_off = (val >> 28) & 0x3F;
381
382                 /* SQ reached quiescent state */
383                 if (sqb_cnt <= 1 && head_off == tail_off &&
384                     (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
385                         break;
386                 }
387
388                 /* Timeout */
389                 if (!timeout)
390                         goto exit;
391                 timeout--;
392         }
393
394         return 0;
395 exit:
396         roc_nix_tm_dump(sq->roc_nix);
397         roc_nix_queues_ctx_dump(sq->roc_nix);
398         return -EFAULT;
399 }
400
401 /* Flush and disable tx queue and its parent SMQ */
402 int
403 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
404 {
405         struct roc_nix *roc_nix = sq->roc_nix;
406         struct nix_tm_node *node, *sibling;
407         struct nix_tm_node_list *list;
408         enum roc_nix_tm_tree tree;
409         struct mbox *mbox;
410         struct nix *nix;
411         uint16_t qid;
412         int rc;
413
414         nix = roc_nix_to_nix_priv(roc_nix);
415
416         /* Need not do anything if tree is in disabled state */
417         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
418                 return 0;
419
420         mbox = (&nix->dev)->mbox;
421         qid = sq->qid;
422
423         tree = nix->tm_tree;
424         list = nix_tm_node_list(nix, tree);
425
426         /* Find the node for this SQ */
427         node = nix_tm_node_search(nix, qid, tree);
428         if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
429                 plt_err("Invalid node/state for sq %u", qid);
430                 return -EFAULT;
431         }
432
433         /* Enable CGX RXTX to drain pkts */
434         if (!roc_nix->io_enabled) {
435                 /* Though it enables both RX MCAM Entries and CGX Link
436                  * we assume all the rx queues are stopped way back.
437                  */
438                 mbox_alloc_msg_nix_lf_start_rx(mbox);
439                 rc = mbox_process(mbox);
440                 if (rc) {
441                         plt_err("cgx start failed, rc=%d", rc);
442                         return rc;
443                 }
444         }
445
446         /* Disable smq xoff for case it was enabled earlier */
447         rc = nix_tm_smq_xoff(nix, node->parent, false);
448         if (rc) {
449                 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
450                         rc);
451                 return rc;
452         }
453
454         /* As per HRM, to disable an SQ, all other SQ's
455          * that feed to same SMQ must be paused before SMQ flush.
456          */
457         TAILQ_FOREACH(sibling, list, node) {
458                 if (sibling->parent != node->parent)
459                         continue;
460                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
461                         continue;
462
463                 qid = sibling->id;
464                 sq = nix->sqs[qid];
465                 if (!sq)
466                         continue;
467
468                 rc = roc_nix_tm_sq_aura_fc(sq, false);
469                 if (rc) {
470                         plt_err("Failed to disable sqb aura fc, rc=%d", rc);
471                         goto cleanup;
472                 }
473
474                 /* Wait for sq entries to be flushed */
475                 rc = roc_nix_tm_sq_flush_spin(sq);
476                 if (rc) {
477                         plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
478                         return rc;
479                 }
480         }
481
482         node->flags &= ~NIX_TM_NODE_ENABLED;
483
484         /* Disable and flush */
485         rc = nix_tm_smq_xoff(nix, node->parent, true);
486         if (rc) {
487                 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
488                         rc);
489                 goto cleanup;
490         }
491 cleanup:
492         /* Restore cgx state */
493         if (!roc_nix->io_enabled) {
494                 mbox_alloc_msg_nix_lf_stop_rx(mbox);
495                 rc |= mbox_process(mbox);
496         }
497
498         return rc;
499 }
500
501 int
502 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
503 {
504         struct roc_nix *roc_nix = sq->roc_nix;
505         struct nix_tm_node *node, *sibling;
506         struct nix_tm_node_list *list;
507         enum roc_nix_tm_tree tree;
508         struct roc_nix_sq *s_sq;
509         bool once = false;
510         uint16_t qid, s_qid;
511         struct nix *nix;
512         int rc;
513
514         nix = roc_nix_to_nix_priv(roc_nix);
515
516         /* Need not do anything if tree is in disabled state */
517         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
518                 return 0;
519
520         qid = sq->qid;
521         tree = nix->tm_tree;
522         list = nix_tm_node_list(nix, tree);
523
524         /* Find the node for this SQ */
525         node = nix_tm_node_search(nix, qid, tree);
526         if (!node) {
527                 plt_err("Invalid node for sq %u", qid);
528                 return -EFAULT;
529         }
530
531         /* Enable all the siblings back */
532         TAILQ_FOREACH(sibling, list, node) {
533                 if (sibling->parent != node->parent)
534                         continue;
535
536                 if (sibling->id == qid)
537                         continue;
538
539                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
540                         continue;
541
542                 s_qid = sibling->id;
543                 s_sq = nix->sqs[s_qid];
544                 if (!s_sq)
545                         continue;
546
547                 if (!once) {
548                         /* Enable back if any SQ is still present */
549                         rc = nix_tm_smq_xoff(nix, node->parent, false);
550                         if (rc) {
551                                 plt_err("Failed to enable smq %u, rc=%d",
552                                         node->parent->hw_id, rc);
553                                 return rc;
554                         }
555                         once = true;
556                 }
557
558                 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
559                 if (rc) {
560                         plt_err("Failed to enable sqb aura fc, rc=%d", rc);
561                         return rc;
562                 }
563         }
564
565         return 0;
566 }
567
568 int
569 nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
570                      bool rr_quantum_only)
571 {
572         struct mbox *mbox = (&nix->dev)->mbox;
573         uint16_t qid = node->id, smq;
574         uint64_t rr_quantum;
575         int rc;
576
577         smq = node->parent->hw_id;
578         rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
579
580         if (rr_quantum_only)
581                 plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
582                            rr_quantum);
583         else
584                 plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
585                            qid, smq, rr_quantum);
586
587         if (qid > nix->nb_tx_queues)
588                 return -EFAULT;
589
590         if (roc_model_is_cn9k()) {
591                 struct nix_aq_enq_req *aq;
592
593                 aq = mbox_alloc_msg_nix_aq_enq(mbox);
594                 aq->qidx = qid;
595                 aq->ctype = NIX_AQ_CTYPE_SQ;
596                 aq->op = NIX_AQ_INSTOP_WRITE;
597
598                 /* smq update only when needed */
599                 if (!rr_quantum_only) {
600                         aq->sq.smq = smq;
601                         aq->sq_mask.smq = ~aq->sq_mask.smq;
602                 }
603                 aq->sq.smq_rr_quantum = rr_quantum;
604                 aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
605         } else {
606                 struct nix_cn10k_aq_enq_req *aq;
607
608                 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
609                 aq->qidx = qid;
610                 aq->ctype = NIX_AQ_CTYPE_SQ;
611                 aq->op = NIX_AQ_INSTOP_WRITE;
612
613                 /* smq update only when needed */
614                 if (!rr_quantum_only) {
615                         aq->sq.smq = smq;
616                         aq->sq_mask.smq = ~aq->sq_mask.smq;
617                 }
618                 aq->sq.smq_rr_weight = rr_quantum;
619                 aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
620         }
621
622         rc = mbox_process(mbox);
623         if (rc)
624                 plt_err("Failed to set smq, rc=%d", rc);
625         return rc;
626 }
627
628 int
629 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
630                          bool above_thresh)
631 {
632         uint16_t avail, thresh, to_free = 0, schq;
633         struct mbox *mbox = (&nix->dev)->mbox;
634         struct nix_txsch_free_req *req;
635         struct plt_bitmap *bmp;
636         uint64_t slab = 0;
637         uint32_t pos = 0;
638         int rc = -ENOSPC;
639
640         bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
641         thresh =
642                 contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
643         plt_bitmap_scan_init(bmp);
644
645         avail = nix_tm_resource_avail(nix, hw_lvl, contig);
646
647         if (above_thresh) {
648                 /* Release only above threshold */
649                 if (avail > thresh)
650                         to_free = avail - thresh;
651         } else {
652                 /* Release everything */
653                 to_free = avail;
654         }
655
656         /* Now release resources to AF */
657         while (to_free) {
658                 if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
659                         break;
660
661                 schq = bitmap_ctzll(slab);
662                 slab &= ~(1ULL << schq);
663                 schq += pos;
664
665                 /* Free to AF */
666                 req = mbox_alloc_msg_nix_txsch_free(mbox);
667                 if (req == NULL)
668                         return rc;
669                 req->flags = 0;
670                 req->schq_lvl = hw_lvl;
671                 req->schq = schq;
672                 rc = mbox_process(mbox);
673                 if (rc) {
674                         plt_err("failed to release hwres %s(%u) rc %d",
675                                 nix_tm_hwlvl2str(hw_lvl), schq, rc);
676                         return rc;
677                 }
678
679                 plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
680                            schq);
681                 plt_bitmap_clear(bmp, schq);
682                 to_free--;
683         }
684
685         if (to_free) {
686                 plt_err("resource inconsistency for %s(%u)",
687                         nix_tm_hwlvl2str(hw_lvl), contig);
688                 return -EFAULT;
689         }
690         return 0;
691 }
692
693 int
694 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
695 {
696         struct mbox *mbox = (&nix->dev)->mbox;
697         struct nix_txsch_free_req *req;
698         struct plt_bitmap *bmp;
699         uint16_t avail, hw_id;
700         uint8_t hw_lvl;
701         int rc = -ENOSPC;
702
703         hw_lvl = node->hw_lvl;
704         hw_id = node->hw_id;
705         bmp = nix->schq_bmp[hw_lvl];
706         /* Free specific HW resource */
707         plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
708                    nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
709                    node);
710
711         avail = nix_tm_resource_avail(nix, hw_lvl, false);
712         /* Always for now free to discontiguous queue when avail
713          * is not sufficient.
714          */
715         if (nix->discontig_rsvd[hw_lvl] &&
716             avail < nix->discontig_rsvd[hw_lvl]) {
717                 PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
718                 PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
719                 plt_bitmap_set(bmp, hw_id);
720                 node->hw_id = NIX_TM_HW_ID_INVALID;
721                 node->flags &= ~NIX_TM_NODE_HWRES;
722                 return 0;
723         }
724
725         /* Free to AF */
726         req = mbox_alloc_msg_nix_txsch_free(mbox);
727         if (req == NULL)
728                 return rc;
729         req->flags = 0;
730         req->schq_lvl = node->hw_lvl;
731         req->schq = hw_id;
732         rc = mbox_process(mbox);
733         if (rc) {
734                 plt_err("failed to release hwres %s(%u) rc %d",
735                         nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
736                 return rc;
737         }
738
739         /* Mark parent as dirty for reallocing it's children */
740         if (node->parent)
741                 node->parent->child_realloc = true;
742
743         node->hw_id = NIX_TM_HW_ID_INVALID;
744         node->flags &= ~NIX_TM_NODE_HWRES;
745         plt_tm_dbg("Released hwres %s(%u) to af",
746                    nix_tm_hwlvl2str(node->hw_lvl), hw_id);
747         return 0;
748 }
749
750 int
751 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
752                    enum roc_nix_tm_tree tree, bool free)
753 {
754         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
755         struct nix_tm_shaper_profile *profile;
756         struct nix_tm_node *node, *child;
757         struct nix_tm_node_list *list;
758         uint32_t profile_id;
759         int rc;
760
761         plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
762
763         node = nix_tm_node_search(nix, node_id, tree);
764         if (!node)
765                 return NIX_ERR_TM_INVALID_NODE;
766
767         list = nix_tm_node_list(nix, tree);
768         /* Check for any existing children */
769         TAILQ_FOREACH(child, list, node) {
770                 if (child->parent == node)
771                         return NIX_ERR_TM_CHILD_EXISTS;
772         }
773
774         /* Remove shaper profile reference */
775         profile_id = node->shaper_profile_id;
776         profile = nix_tm_shaper_profile_search(nix, profile_id);
777
778         /* Free hw resource locally */
779         if (node->flags & NIX_TM_NODE_HWRES) {
780                 rc = nix_tm_free_node_resource(nix, node);
781                 if (rc)
782                         return rc;
783         }
784
785         if (profile)
786                 profile->ref_cnt--;
787
788         TAILQ_REMOVE(list, node, node);
789
790         plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
791                    "parent %u profile 0x%x tree %u (%p)",
792                    nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
793                    node->priority, node->weight,
794                    node->parent ? node->parent->id : UINT32_MAX,
795                    node->shaper_profile_id, tree, node);
796         /* Free only if requested */
797         if (free)
798                 nix_tm_node_free(node);
799         return 0;
800 }
801
802 static int
803 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
804                     uint16_t *contig_id, int *contig_cnt,
805                     struct nix_tm_node_list *list)
806 {
807         struct nix_tm_node *child;
808         struct plt_bitmap *bmp;
809         uint8_t child_hw_lvl;
810         int spare_schq = -1;
811         uint32_t pos = 0;
812         uint64_t slab;
813         uint16_t schq;
814
815         child_hw_lvl = parent->hw_lvl - 1;
816         bmp = nix->schq_bmp[child_hw_lvl];
817         plt_bitmap_scan_init(bmp);
818         slab = 0;
819
820         /* Save spare schq if it is case of RR + SP */
821         if (parent->rr_prio != 0xf && *contig_cnt > 1)
822                 spare_schq = *contig_id + parent->rr_prio;
823
824         TAILQ_FOREACH(child, list, node) {
825                 if (!child->parent)
826                         continue;
827                 if (child->parent->id != parent->id)
828                         continue;
829
830                 /* Resource never expected to be present */
831                 if (child->flags & NIX_TM_NODE_HWRES) {
832                         plt_err("Resource exists for child (%s)%u, id %u (%p)",
833                                 nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
834                                 child->id, child);
835                         return -EFAULT;
836                 }
837
838                 if (!slab)
839                         plt_bitmap_scan(bmp, &pos, &slab);
840
841                 if (child->priority == parent->rr_prio && spare_schq != -1) {
842                         /* Use spare schq first if present */
843                         schq = spare_schq;
844                         spare_schq = -1;
845                         *contig_cnt = *contig_cnt - 1;
846
847                 } else if (child->priority == parent->rr_prio) {
848                         /* Assign a discontiguous queue */
849                         if (!slab) {
850                                 plt_err("Schq not found for Child %u "
851                                         "lvl %u (%p)",
852                                         child->id, child->lvl, child);
853                                 return -ENOENT;
854                         }
855
856                         schq = bitmap_ctzll(slab);
857                         slab &= ~(1ULL << schq);
858                         schq += pos;
859                         plt_bitmap_clear(bmp, schq);
860                 } else {
861                         /* Assign a contiguous queue */
862                         schq = *contig_id + child->priority;
863                         *contig_cnt = *contig_cnt - 1;
864                 }
865
866                 plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
867                            nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
868                            child->id, child);
869
870                 child->hw_id = schq;
871                 child->parent_hw_id = parent->hw_id;
872                 child->flags |= NIX_TM_NODE_HWRES;
873         }
874
875         return 0;
876 }
877
878 int
879 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
880 {
881         struct nix_tm_node *parent, *root = NULL;
882         struct plt_bitmap *bmp, *bmp_contig;
883         struct nix_tm_node_list *list;
884         uint8_t child_hw_lvl, hw_lvl;
885         uint16_t contig_id, j;
886         uint64_t slab = 0;
887         uint32_t pos = 0;
888         int cnt, rc;
889
890         list = nix_tm_node_list(nix, tree);
891         /* Walk from TL1 to TL4 parents */
892         for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
893                 TAILQ_FOREACH(parent, list, node) {
894                         child_hw_lvl = parent->hw_lvl - 1;
895                         if (parent->hw_lvl != hw_lvl)
896                                 continue;
897
898                         /* Remember root for future */
899                         if (parent->hw_lvl == nix->tm_root_lvl)
900                                 root = parent;
901
902                         if (!parent->child_realloc) {
903                                 /* Skip when parent is not dirty */
904                                 if (nix_tm_child_res_valid(list, parent))
905                                         continue;
906                                 plt_err("Parent not dirty but invalid "
907                                         "child res parent id %u(lvl %u)",
908                                         parent->id, parent->lvl);
909                                 return -EFAULT;
910                         }
911
912                         bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
913
914                         /* Prealloc contiguous indices for a parent */
915                         contig_id = NIX_TM_MAX_HW_TXSCHQ;
916                         cnt = (int)parent->max_prio + 1;
917                         if (cnt > 0) {
918                                 plt_bitmap_scan_init(bmp_contig);
919                                 if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
920                                         plt_err("Contig schq not found");
921                                         return -ENOENT;
922                                 }
923                                 contig_id = pos + bitmap_ctzll(slab);
924
925                                 /* Check if we have enough */
926                                 for (j = contig_id; j < contig_id + cnt; j++) {
927                                         if (!plt_bitmap_get(bmp_contig, j))
928                                                 break;
929                                 }
930
931                                 if (j != contig_id + cnt) {
932                                         plt_err("Contig schq not sufficient");
933                                         return -ENOENT;
934                                 }
935
936                                 for (j = contig_id; j < contig_id + cnt; j++)
937                                         plt_bitmap_clear(bmp_contig, j);
938                         }
939
940                         /* Assign hw id to all children */
941                         rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
942                                                  list);
943                         if (cnt || rc) {
944                                 plt_err("Unexpected err, contig res alloc, "
945                                         "parent %u, of %s, rc=%d, cnt=%d",
946                                         parent->id, nix_tm_hwlvl2str(hw_lvl),
947                                         rc, cnt);
948                                 return -EFAULT;
949                         }
950
951                         /* Clear the dirty bit as children's
952                          * resources are reallocated.
953                          */
954                         parent->child_realloc = false;
955                 }
956         }
957
958         /* Root is always expected to be there */
959         if (!root)
960                 return -EFAULT;
961
962         if (root->flags & NIX_TM_NODE_HWRES)
963                 return 0;
964
965         /* Process root node */
966         bmp = nix->schq_bmp[nix->tm_root_lvl];
967         plt_bitmap_scan_init(bmp);
968         if (!plt_bitmap_scan(bmp, &pos, &slab)) {
969                 plt_err("Resource not allocated for root");
970                 return -EIO;
971         }
972
973         root->hw_id = pos + bitmap_ctzll(slab);
974         root->flags |= NIX_TM_NODE_HWRES;
975         plt_bitmap_clear(bmp, root->hw_id);
976
977         /* Get TL1 id as well when root is not TL1 */
978         if (!nix_tm_have_tl1_access(nix)) {
979                 bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
980
981                 plt_bitmap_scan_init(bmp);
982                 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
983                         plt_err("Resource not found for TL1");
984                         return -EIO;
985                 }
986                 root->parent_hw_id = pos + bitmap_ctzll(slab);
987                 plt_bitmap_clear(bmp, root->parent_hw_id);
988         }
989
990         plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
991                    nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
992
993         return 0;
994 }
995
996 void
997 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
998 {
999         uint8_t lvl;
1000         uint16_t i;
1001
1002         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1003                 for (i = 0; i < rsp->schq[lvl]; i++)
1004                         plt_bitmap_set(nix->schq_bmp[lvl],
1005                                        rsp->schq_list[lvl][i]);
1006
1007                 for (i = 0; i < rsp->schq_contig[lvl]; i++)
1008                         plt_bitmap_set(nix->schq_contig_bmp[lvl],
1009                                        rsp->schq_contig_list[lvl][i]);
1010         }
1011 }
1012
1013 int
1014 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
1015 {
1016         uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
1017         struct mbox *mbox = (&nix->dev)->mbox;
1018         uint16_t schq[NIX_TXSCH_LVL_CNT];
1019         struct nix_txsch_alloc_req *req;
1020         struct nix_txsch_alloc_rsp *rsp;
1021         uint8_t hw_lvl, i;
1022         bool pend;
1023         int rc;
1024
1025         memset(schq, 0, sizeof(schq));
1026         memset(schq_contig, 0, sizeof(schq_contig));
1027
1028         /* Estimate requirement */
1029         rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
1030         if (!rc)
1031                 return 0;
1032
1033         /* Release existing contiguous resources when realloc requested
1034          * as there is no way to guarantee continuity of old with new.
1035          */
1036         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1037                 if (schq_contig[hw_lvl])
1038                         nix_tm_release_resources(nix, hw_lvl, true, false);
1039         }
1040
1041         /* Alloc as needed */
1042         do {
1043                 pend = false;
1044                 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
1045                 if (!req) {
1046                         rc = -ENOMEM;
1047                         goto alloc_err;
1048                 }
1049                 mbox_memcpy(req->schq, schq, sizeof(req->schq));
1050                 mbox_memcpy(req->schq_contig, schq_contig,
1051                             sizeof(req->schq_contig));
1052
1053                 /* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
1054                  * So split alloc to multiple requests.
1055                  */
1056                 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1057                         if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
1058                                 req->schq[i] = MAX_TXSCHQ_PER_FUNC;
1059                         schq[i] -= req->schq[i];
1060
1061                         if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
1062                                 req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
1063                         schq_contig[i] -= req->schq_contig[i];
1064
1065                         if (schq[i] || schq_contig[i])
1066                                 pend = true;
1067                 }
1068
1069                 rc = mbox_process_msg(mbox, (void *)&rsp);
1070                 if (rc)
1071                         goto alloc_err;
1072
1073                 nix_tm_copy_rsp_to_nix(nix, rsp);
1074         } while (pend);
1075
1076         nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
1077         return 0;
1078 alloc_err:
1079         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1080                 if (nix_tm_release_resources(nix, i, true, false))
1081                         plt_err("Failed to release contig resources of "
1082                                 "lvl %d on error",
1083                                 i);
1084                 if (nix_tm_release_resources(nix, i, false, false))
1085                         plt_err("Failed to release discontig resources of "
1086                                 "lvl %d on error",
1087                                 i);
1088         }
1089         return rc;
1090 }
1091
1092 int
1093 nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
1094 {
1095         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1096         uint32_t nonleaf_id = nix->nb_tx_queues;
1097         struct nix_tm_node *node = NULL;
1098         uint8_t leaf_lvl, lvl, lvl_end;
1099         uint32_t parent, i;
1100         int rc = 0;
1101
1102         /* Add ROOT, SCH1, SCH2, SCH3, [SCH4]  nodes */
1103         parent = ROC_NIX_TM_NODE_ID_INVALID;
1104         /* With TL1 access we have an extra level */
1105         lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
1106                                                        ROC_TM_LVL_SCH3);
1107
1108         for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1109                 rc = -ENOMEM;
1110                 node = nix_tm_node_alloc();
1111                 if (!node)
1112                         goto error;
1113
1114                 node->id = nonleaf_id;
1115                 node->parent_id = parent;
1116                 node->priority = 0;
1117                 node->weight = NIX_TM_DFLT_RR_WT;
1118                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1119                 node->lvl = lvl;
1120                 node->tree = ROC_NIX_TM_DEFAULT;
1121
1122                 rc = nix_tm_node_add(roc_nix, node);
1123                 if (rc)
1124                         goto error;
1125                 parent = nonleaf_id;
1126                 nonleaf_id++;
1127         }
1128
1129         parent = nonleaf_id - 1;
1130         leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1131                                                         ROC_TM_LVL_SCH4);
1132
1133         /* Add leaf nodes */
1134         for (i = 0; i < nix->nb_tx_queues; i++) {
1135                 rc = -ENOMEM;
1136                 node = nix_tm_node_alloc();
1137                 if (!node)
1138                         goto error;
1139
1140                 node->id = i;
1141                 node->parent_id = parent;
1142                 node->priority = 0;
1143                 node->weight = NIX_TM_DFLT_RR_WT;
1144                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1145                 node->lvl = leaf_lvl;
1146                 node->tree = ROC_NIX_TM_DEFAULT;
1147
1148                 rc = nix_tm_node_add(roc_nix, node);
1149                 if (rc)
1150                         goto error;
1151         }
1152
1153         return 0;
1154 error:
1155         nix_tm_node_free(node);
1156         return rc;
1157 }
1158
1159 int
1160 nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
1161 {
1162         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1163         uint32_t nonleaf_id = nix->nb_tx_queues;
1164         struct nix_tm_node *node = NULL;
1165         uint8_t leaf_lvl, lvl, lvl_end;
1166         uint32_t parent, i;
1167         int rc = 0;
1168
1169         /* Add ROOT, SCH1, SCH2 nodes */
1170         parent = ROC_NIX_TM_NODE_ID_INVALID;
1171         lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
1172                                                        ROC_TM_LVL_SCH2);
1173
1174         for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1175                 rc = -ENOMEM;
1176                 node = nix_tm_node_alloc();
1177                 if (!node)
1178                         goto error;
1179
1180                 node->id = nonleaf_id;
1181                 node->parent_id = parent;
1182                 node->priority = 0;
1183                 node->weight = NIX_TM_DFLT_RR_WT;
1184                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1185                 node->lvl = lvl;
1186                 node->tree = ROC_NIX_TM_RLIMIT;
1187
1188                 rc = nix_tm_node_add(roc_nix, node);
1189                 if (rc)
1190                         goto error;
1191                 parent = nonleaf_id;
1192                 nonleaf_id++;
1193         }
1194
1195         /* SMQ is mapped to SCH4 when we have TL1 access and SCH3 otherwise */
1196         lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
1197
1198         /* Add per queue SMQ nodes i.e SCH4 / SCH3 */
1199         for (i = 0; i < nix->nb_tx_queues; i++) {
1200                 rc = -ENOMEM;
1201                 node = nix_tm_node_alloc();
1202                 if (!node)
1203                         goto error;
1204
1205                 node->id = nonleaf_id + i;
1206                 node->parent_id = parent;
1207                 node->priority = 0;
1208                 node->weight = NIX_TM_DFLT_RR_WT;
1209                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1210                 node->lvl = lvl;
1211                 node->tree = ROC_NIX_TM_RLIMIT;
1212
1213                 rc = nix_tm_node_add(roc_nix, node);
1214                 if (rc)
1215                         goto error;
1216         }
1217
1218         parent = nonleaf_id;
1219         leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1220                                                         ROC_TM_LVL_SCH4);
1221
1222         /* Add leaf nodes */
1223         for (i = 0; i < nix->nb_tx_queues; i++) {
1224                 rc = -ENOMEM;
1225                 node = nix_tm_node_alloc();
1226                 if (!node)
1227                         goto error;
1228
1229                 node->id = i;
1230                 node->parent_id = parent;
1231                 node->priority = 0;
1232                 node->weight = NIX_TM_DFLT_RR_WT;
1233                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1234                 node->lvl = leaf_lvl;
1235                 node->tree = ROC_NIX_TM_RLIMIT;
1236
1237                 rc = nix_tm_node_add(roc_nix, node);
1238                 if (rc)
1239                         goto error;
1240         }
1241
1242         return 0;
1243 error:
1244         nix_tm_node_free(node);
1245         return rc;
1246 }
1247
1248 int
1249 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
1250 {
1251         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1252         struct nix_tm_shaper_profile *profile;
1253         struct nix_tm_node *node, *next_node;
1254         struct nix_tm_node_list *list;
1255         enum roc_nix_tm_tree tree;
1256         uint32_t profile_id;
1257         int rc = 0;
1258
1259         for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
1260                 if (!(tree_mask & BIT(tree)))
1261                         continue;
1262
1263                 plt_tm_dbg("Freeing resources of tree %u", tree);
1264
1265                 list = nix_tm_node_list(nix, tree);
1266                 next_node = TAILQ_FIRST(list);
1267                 while (next_node) {
1268                         node = next_node;
1269                         next_node = TAILQ_NEXT(node, node);
1270
1271                         if (!nix_tm_is_leaf(nix, node->lvl) &&
1272                             node->flags & NIX_TM_NODE_HWRES) {
1273                                 /* Clear xoff in path for flush to succeed */
1274                                 rc = nix_tm_clear_path_xoff(nix, node);
1275                                 if (rc)
1276                                         return rc;
1277                                 rc = nix_tm_free_node_resource(nix, node);
1278                                 if (rc)
1279                                         return rc;
1280                         }
1281                 }
1282
1283                 /* Leave software elements if needed */
1284                 if (hw_only)
1285                         continue;
1286
1287                 next_node = TAILQ_FIRST(list);
1288                 while (next_node) {
1289                         node = next_node;
1290                         next_node = TAILQ_NEXT(node, node);
1291
1292                         plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
1293                                    node->id, node);
1294
1295                         profile_id = node->shaper_profile_id;
1296                         profile = nix_tm_shaper_profile_search(nix, profile_id);
1297                         if (profile)
1298                                 profile->ref_cnt--;
1299
1300                         TAILQ_REMOVE(list, node, node);
1301                         nix_tm_node_free(node);
1302                 }
1303         }
1304         return rc;
1305 }
1306
1307 int
1308 nix_tm_conf_init(struct roc_nix *roc_nix)
1309 {
1310         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1311         uint32_t bmp_sz, hw_lvl;
1312         void *bmp_mem;
1313         int rc, i;
1314
1315         PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1316         PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1317                           ROC_NIX_TM_SHAPER_PROFILE_SZ);
1318
1319         nix->tm_flags = 0;
1320         for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1321                 TAILQ_INIT(&nix->trees[i]);
1322
1323         TAILQ_INIT(&nix->shaper_profile_list);
1324         nix->tm_rate_min = 1E9; /* 1Gbps */
1325
1326         rc = -ENOMEM;
1327         bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1328         bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1329         if (!bmp_mem)
1330                 return rc;
1331         nix->schq_bmp_mem = bmp_mem;
1332
1333         /* Init contiguous and discontiguous bitmap per lvl */
1334         rc = -EIO;
1335         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1336                 /* Bitmap for discontiguous resource */
1337                 nix->schq_bmp[hw_lvl] =
1338                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1339                 if (!nix->schq_bmp[hw_lvl])
1340                         goto exit;
1341
1342                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1343
1344                 /* Bitmap for contiguous resource */
1345                 nix->schq_contig_bmp[hw_lvl] =
1346                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1347                 if (!nix->schq_contig_bmp[hw_lvl])
1348                         goto exit;
1349
1350                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1351         }
1352
1353         /* Disable TL1 Static Priority when VF's are enabled
1354          * as otherwise VF's TL2 reallocation will be needed
1355          * runtime to support a specific topology of PF.
1356          */
1357         if (nix->pci_dev->max_vfs)
1358                 nix->tm_flags |= NIX_TM_TL1_NO_SP;
1359
1360         /* TL1 access is only for PF's */
1361         if (roc_nix_is_pf(roc_nix)) {
1362                 nix->tm_flags |= NIX_TM_TL1_ACCESS;
1363                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
1364         } else {
1365                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
1366         }
1367
1368         return 0;
1369 exit:
1370         nix_tm_conf_fini(roc_nix);
1371         return rc;
1372 }
1373
1374 void
1375 nix_tm_conf_fini(struct roc_nix *roc_nix)
1376 {
1377         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1378         uint16_t hw_lvl;
1379
1380         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1381                 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
1382                 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
1383         }
1384         plt_free(nix->schq_bmp_mem);
1385 }