common/cnxk: fix null pointer dereferences
[dpdk.git] / drivers / common / cnxk / roc_nix_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 static inline int
9 bitmap_ctzll(uint64_t slab)
10 {
11         if (slab == 0)
12                 return 0;
13
14         return __builtin_ctzll(slab);
15 }
16
17 void
18 nix_tm_clear_shaper_profiles(struct nix *nix)
19 {
20         struct nix_tm_shaper_profile *shaper_profile, *tmp;
21         struct nix_tm_shaper_profile_list *list;
22
23         list = &nix->shaper_profile_list;
24         PLT_TAILQ_FOREACH_SAFE(shaper_profile, list, shaper, tmp) {
25                 if (shaper_profile->ref_cnt)
26                         plt_warn("Shaper profile %u has non zero references",
27                                  shaper_profile->id);
28                 TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
29                 nix_tm_shaper_profile_free(shaper_profile);
30         }
31 }
32
33 static int
34 nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
35 {
36         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
37         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
38         struct nix_tm_shaper_profile *profile;
39         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
40         struct mbox *mbox = (&nix->dev)->mbox;
41         struct nix_txschq_config *req;
42         int rc = -EFAULT;
43         uint32_t hw_lvl;
44         uint8_t k = 0;
45
46         memset(regval, 0, sizeof(regval));
47         memset(regval_mask, 0, sizeof(regval_mask));
48
49         profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
50         hw_lvl = node->hw_lvl;
51
52         /* Need this trigger to configure TL1 */
53         if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
54                 /* Prepare default conf for TL1 */
55                 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
56                 req->lvl = NIX_TXSCH_LVL_TL1;
57
58                 k = nix_tm_tl1_default_prep(node->parent_hw_id, req->reg,
59                                             req->regval);
60                 req->num_regs = k;
61                 rc = mbox_process(mbox);
62                 if (rc)
63                         goto error;
64         }
65
66         /* Prepare topology config */
67         k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
68
69         /* Prepare schedule config */
70         k += nix_tm_sched_reg_prep(nix, node, &reg[k], &regval[k]);
71
72         /* Prepare shaping config */
73         k += nix_tm_shaper_reg_prep(node, profile, &reg[k], &regval[k]);
74
75         if (!k)
76                 return 0;
77
78         /* Copy and send config mbox */
79         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
80         req->lvl = hw_lvl;
81         req->num_regs = k;
82
83         mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
84         mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
85         mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
86
87         rc = mbox_process(mbox);
88         if (rc)
89                 goto error;
90
91         return 0;
92 error:
93         plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
94         return rc;
95 }
96
97 int
98 nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
99 {
100         struct nix_tm_node_list *list;
101         bool is_pf_or_lbk = false;
102         struct nix_tm_node *node;
103         bool skip_bp = false;
104         uint32_t hw_lvl;
105         int rc = 0;
106
107         list = nix_tm_node_list(nix, tree);
108
109         if ((!dev_is_vf(&nix->dev) || nix->lbk_link) && !nix->sdp_link)
110                 is_pf_or_lbk = true;
111
112         for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
113                 TAILQ_FOREACH(node, list, node) {
114                         if (node->hw_lvl != hw_lvl)
115                                 continue;
116
117                         /* Only one TL3/TL2 Link config should have BP enable
118                          * set per channel only for PF or lbk vf.
119                          */
120                         node->bp_capa = 0;
121                         if (is_pf_or_lbk && !skip_bp &&
122                             node->hw_lvl == nix->tm_link_cfg_lvl) {
123                                 node->bp_capa = 1;
124                                 skip_bp = true;
125                         }
126
127                         rc = nix_tm_node_reg_conf(nix, node);
128                         if (rc)
129                                 goto exit;
130                 }
131         }
132 exit:
133         return rc;
134 }
135
136 int
137 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
138 {
139         struct nix_tm_node *child, *parent;
140         struct nix_tm_node_list *list;
141         uint32_t rr_prio, max_prio;
142         uint32_t rr_num = 0;
143
144         list = nix_tm_node_list(nix, tree);
145
146         /* Release all the node hw resources locally
147          * if parent marked as dirty and resource exists.
148          */
149         TAILQ_FOREACH(child, list, node) {
150                 /* Release resource only if parent direct hierarchy changed */
151                 if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
152                     child->parent->child_realloc) {
153                         nix_tm_free_node_resource(nix, child);
154                 }
155                 child->max_prio = UINT32_MAX;
156         }
157
158         TAILQ_FOREACH(parent, list, node) {
159                 /* Count group of children of same priority i.e are RR */
160                 rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
161                                          &max_prio);
162
163                 /* Assuming that multiple RR groups are
164                  * not configured based on capability.
165                  */
166                 parent->rr_prio = rr_prio;
167                 parent->rr_num = rr_num;
168                 parent->max_prio = max_prio;
169         }
170
171         return 0;
172 }
173
174 static int
175 nix_tm_root_node_get(struct nix *nix, int tree)
176 {
177         struct nix_tm_node_list *list = nix_tm_node_list(nix, tree);
178         struct nix_tm_node *tm_node;
179
180         TAILQ_FOREACH(tm_node, list, node) {
181                 if (tm_node->hw_lvl == nix->tm_root_lvl)
182                         return 1;
183         }
184
185         return 0;
186 }
187
188 int
189 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
190 {
191         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
192         struct nix_tm_shaper_profile *profile;
193         uint32_t node_id, parent_id, lvl;
194         struct nix_tm_node *parent_node;
195         uint32_t priority, profile_id;
196         uint8_t hw_lvl, exp_next_lvl;
197         enum roc_nix_tm_tree tree;
198         int rc;
199
200         node_id = node->id;
201         priority = node->priority;
202         parent_id = node->parent_id;
203         profile_id = node->shaper_profile_id;
204         lvl = node->lvl;
205         tree = node->tree;
206
207         plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
208                    "parent %u profile 0x%x tree %u",
209                    nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
210                    priority, node->weight, parent_id, profile_id, tree);
211
212         if (tree >= ROC_NIX_TM_TREE_MAX)
213                 return NIX_ERR_PARAM;
214
215         /* Translate sw level id's to nix hw level id's */
216         hw_lvl = nix_tm_lvl2nix(nix, lvl);
217         if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
218                 return NIX_ERR_TM_INVALID_LVL;
219
220         /* Leaf nodes have to be same priority */
221         if (nix_tm_is_leaf(nix, lvl) && priority != 0)
222                 return NIX_ERR_TM_INVALID_PRIO;
223
224         parent_node = nix_tm_node_search(nix, parent_id, tree);
225
226         if (node_id < nix->nb_tx_queues)
227                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
228         else
229                 exp_next_lvl = hw_lvl + 1;
230
231         /* Check if there is no parent node yet */
232         if (hw_lvl != nix->tm_root_lvl &&
233             (!parent_node || parent_node->hw_lvl != exp_next_lvl))
234                 return NIX_ERR_TM_INVALID_PARENT;
235
236         /* Check if a node already exists */
237         if (nix_tm_node_search(nix, node_id, tree))
238                 return NIX_ERR_TM_NODE_EXISTS;
239
240         /* Check if root node exists */
241         if (hw_lvl == nix->tm_root_lvl && nix_tm_root_node_get(nix, tree))
242                 return NIX_ERR_TM_NODE_EXISTS;
243
244         profile = nix_tm_shaper_profile_search(nix, profile_id);
245         if (!nix_tm_is_leaf(nix, lvl)) {
246                 /* Check if shaper profile exists for non leaf node */
247                 if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
248                         return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
249
250                 /* Packet mode in profile should match with that of tm node */
251                 if (profile && profile->pkt_mode != node->pkt_mode)
252                         return NIX_ERR_TM_PKT_MODE_MISMATCH;
253         }
254
255         /* Check if there is second DWRR already in siblings or holes in prio */
256         rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
257         if (rc)
258                 return rc;
259
260         if (node->weight > roc_nix_tm_max_sched_wt_get())
261                 return NIX_ERR_TM_WEIGHT_EXCEED;
262
263         /* Maintain minimum weight */
264         if (!node->weight)
265                 node->weight = 1;
266
267         node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
268         node->rr_prio = 0xF;
269         node->max_prio = UINT32_MAX;
270         node->hw_id = NIX_TM_HW_ID_INVALID;
271         node->flags = 0;
272
273         if (profile)
274                 profile->ref_cnt++;
275
276         node->parent = parent_node;
277         if (parent_node)
278                 parent_node->child_realloc = true;
279         node->parent_hw_id = NIX_TM_HW_ID_INVALID;
280
281         TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
282         plt_tm_dbg("Added node %s lvl %u id %u (%p)",
283                    nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
284         return 0;
285 }
286
287 int
288 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
289 {
290         struct mbox *mbox = (&nix->dev)->mbox;
291         struct nix_txschq_config *req;
292         struct nix_tm_node *p;
293         int rc;
294
295         /* Enable nodes in path for flush to succeed */
296         if (!nix_tm_is_leaf(nix, node->lvl))
297                 p = node;
298         else
299                 p = node->parent;
300         while (p) {
301                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
302                     (p->flags & NIX_TM_NODE_HWRES)) {
303                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
304                         req->lvl = p->hw_lvl;
305                         req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
306                                                             req->regval);
307                         rc = mbox_process(mbox);
308                         if (rc)
309                                 return rc;
310
311                         p->flags |= NIX_TM_NODE_ENABLED;
312                 }
313                 p = p->parent;
314         }
315
316         return 0;
317 }
318
319 int
320 nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
321 {
322         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
323         enum roc_nix_tm_tree tree = nix->tm_tree;
324         struct mbox *mbox = (&nix->dev)->mbox;
325         struct nix_txschq_config *req = NULL;
326         struct nix_tm_node_list *list;
327         struct nix_tm_node *node;
328         uint8_t k = 0;
329         uint16_t link;
330         int rc = 0;
331
332         list = nix_tm_node_list(nix, tree);
333         link = nix->tx_link;
334
335         TAILQ_FOREACH(node, list, node) {
336                 if (node->hw_lvl != nix->tm_link_cfg_lvl)
337                         continue;
338
339                 if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
340                         continue;
341
342                 if (!req) {
343                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
344                         req->lvl = nix->tm_link_cfg_lvl;
345                         k = 0;
346                 }
347
348                 req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
349                 req->regval[k] = enable ? BIT_ULL(13) : 0;
350                 req->regval_mask[k] = ~BIT_ULL(13);
351                 k++;
352
353                 if (k >= MAX_REGS_PER_MBOX_MSG) {
354                         req->num_regs = k;
355                         rc = mbox_process(mbox);
356                         if (rc)
357                                 goto err;
358                         req = NULL;
359                 }
360         }
361
362         if (req) {
363                 req->num_regs = k;
364                 rc = mbox_process(mbox);
365                 if (rc)
366                         goto err;
367         }
368
369         return 0;
370 err:
371         plt_err("Failed to %s bp on link %u, rc=%d(%s)",
372                 enable ? "enable" : "disable", link, rc, roc_error_msg_get(rc));
373         return rc;
374 }
375
376 int
377 nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled)
378 {
379         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
380         struct nix_txschq_config *req = NULL, *rsp;
381         enum roc_nix_tm_tree tree = nix->tm_tree;
382         struct mbox *mbox = (&nix->dev)->mbox;
383         struct nix_tm_node_list *list;
384         struct nix_tm_node *node;
385         bool found = false;
386         uint8_t enable = 1;
387         uint8_t k = 0, i;
388         uint16_t link;
389         int rc = 0;
390
391         list = nix_tm_node_list(nix, tree);
392         link = nix->tx_link;
393
394         TAILQ_FOREACH(node, list, node) {
395                 if (node->hw_lvl != nix->tm_link_cfg_lvl)
396                         continue;
397
398                 if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
399                         continue;
400
401                 found = true;
402                 if (!req) {
403                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
404                         req->read = 1;
405                         req->lvl = nix->tm_link_cfg_lvl;
406                         k = 0;
407                 }
408
409                 req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
410                 k++;
411
412                 if (k >= MAX_REGS_PER_MBOX_MSG) {
413                         req->num_regs = k;
414                         rc = mbox_process_msg(mbox, (void **)&rsp);
415                         if (rc || rsp->num_regs != k)
416                                 goto err;
417                         req = NULL;
418
419                         /* Report it as enabled only if enabled or all */
420                         for (i = 0; i < k; i++)
421                                 enable &= !!(rsp->regval[i] & BIT_ULL(13));
422                 }
423         }
424
425         if (req) {
426                 req->num_regs = k;
427                 rc = mbox_process(mbox);
428                 if (rc)
429                         goto err;
430                 /* Report it as enabled only if enabled or all */
431                 for (i = 0; i < k; i++)
432                         enable &= !!(rsp->regval[i] & BIT_ULL(13));
433         }
434
435         *is_enabled = found ? !!enable : false;
436         return 0;
437 err:
438         plt_err("Failed to get bp status on link %u, rc=%d(%s)", link, rc,
439                 roc_error_msg_get(rc));
440         return rc;
441 }
442
443 int
444 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
445 {
446         struct mbox *mbox = (&nix->dev)->mbox;
447         struct nix_txschq_config *req;
448         uint16_t smq;
449         int rc;
450
451         smq = node->hw_id;
452         plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
453                    enable ? "enable" : "disable");
454
455         rc = nix_tm_clear_path_xoff(nix, node);
456         if (rc)
457                 return rc;
458
459         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
460         req->lvl = NIX_TXSCH_LVL_SMQ;
461         req->num_regs = 1;
462
463         req->reg[0] = NIX_AF_SMQX_CFG(smq);
464         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
465         req->regval_mask[0] =
466                 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
467
468         return mbox_process(mbox);
469 }
470
471 int
472 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
473                      uint16_t *smq)
474 {
475         struct nix_tm_node *node;
476         int rc;
477
478         node = nix_tm_node_search(nix, sq, nix->tm_tree);
479
480         /* Check if we found a valid leaf node */
481         if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
482             node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
483                 return -EIO;
484         }
485
486         /* Get SMQ Id of leaf node's parent */
487         *smq = node->parent->hw_id;
488         *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
489
490         rc = nix_tm_smq_xoff(nix, node->parent, false);
491         if (rc)
492                 return rc;
493         node->flags |= NIX_TM_NODE_ENABLED;
494         return 0;
495 }
496
497 int
498 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
499 {
500         struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
501         uint16_t sqb_cnt, head_off, tail_off;
502         uint64_t wdata, val, prev;
503         uint16_t qid = sq->qid;
504         int64_t *regaddr;
505         uint64_t timeout; /* 10's of usec */
506
507         /* Wait for enough time based on shaper min rate */
508         timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
509         /* Wait for worst case scenario of this SQ being last priority
510          * and so have to wait for all other SQ's drain out by their own.
511          */
512         timeout = timeout * nix->nb_tx_queues;
513         timeout = timeout / nix->tm_rate_min;
514         if (!timeout)
515                 timeout = 10000;
516
517         wdata = ((uint64_t)qid << 32);
518         regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
519         val = roc_atomic64_add_nosync(wdata, regaddr);
520
521         /* Spin multiple iterations as "sq->fc_cache_pkts" can still
522          * have space to send pkts even though fc_mem is disabled
523          */
524
525         while (true) {
526                 prev = val;
527                 plt_delay_us(10);
528                 val = roc_atomic64_add_nosync(wdata, regaddr);
529                 /* Continue on error */
530                 if (val & BIT_ULL(63))
531                         continue;
532
533                 if (prev != val)
534                         continue;
535
536                 sqb_cnt = val & 0xFFFF;
537                 head_off = (val >> 20) & 0x3F;
538                 tail_off = (val >> 28) & 0x3F;
539
540                 /* SQ reached quiescent state */
541                 if (sqb_cnt <= 1 && head_off == tail_off &&
542                     (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
543                         break;
544                 }
545
546                 /* Timeout */
547                 if (!timeout)
548                         goto exit;
549                 timeout--;
550         }
551
552         return 0;
553 exit:
554         roc_nix_tm_dump(sq->roc_nix);
555         roc_nix_queues_ctx_dump(sq->roc_nix);
556         return -EFAULT;
557 }
558
559 /* Flush and disable tx queue and its parent SMQ */
560 int
561 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
562 {
563         struct roc_nix *roc_nix = sq->roc_nix;
564         struct nix_tm_node *node, *sibling;
565         struct nix_tm_node_list *list;
566         enum roc_nix_tm_tree tree;
567         struct mbox *mbox;
568         struct nix *nix;
569         uint16_t qid;
570         int rc;
571
572         nix = roc_nix_to_nix_priv(roc_nix);
573
574         /* Need not do anything if tree is in disabled state */
575         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
576                 return 0;
577
578         mbox = (&nix->dev)->mbox;
579         qid = sq->qid;
580
581         tree = nix->tm_tree;
582         list = nix_tm_node_list(nix, tree);
583
584         /* Find the node for this SQ */
585         node = nix_tm_node_search(nix, qid, tree);
586         if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
587                 plt_err("Invalid node/state for sq %u", qid);
588                 return -EFAULT;
589         }
590
591         /* Enable CGX RXTX to drain pkts */
592         if (!roc_nix->io_enabled) {
593                 /* Though it enables both RX MCAM Entries and CGX Link
594                  * we assume all the rx queues are stopped way back.
595                  */
596                 mbox_alloc_msg_nix_lf_start_rx(mbox);
597                 rc = mbox_process(mbox);
598                 if (rc) {
599                         plt_err("cgx start failed, rc=%d", rc);
600                         return rc;
601                 }
602         }
603
604         /* Disable backpressure */
605         rc = nix_tm_bp_config_set(roc_nix, false);
606         if (rc) {
607                 plt_err("Failed to disable backpressure for flush, rc=%d", rc);
608                 return rc;
609         }
610
611         /* Disable smq xoff for case it was enabled earlier */
612         rc = nix_tm_smq_xoff(nix, node->parent, false);
613         if (rc) {
614                 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
615                         rc);
616                 return rc;
617         }
618
619         /* As per HRM, to disable an SQ, all other SQ's
620          * that feed to same SMQ must be paused before SMQ flush.
621          */
622         TAILQ_FOREACH(sibling, list, node) {
623                 if (sibling->parent != node->parent)
624                         continue;
625                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
626                         continue;
627
628                 qid = sibling->id;
629                 sq = nix->sqs[qid];
630                 if (!sq)
631                         continue;
632
633                 rc = roc_nix_tm_sq_aura_fc(sq, false);
634                 if (rc) {
635                         plt_err("Failed to disable sqb aura fc, rc=%d", rc);
636                         goto cleanup;
637                 }
638
639                 /* Wait for sq entries to be flushed */
640                 rc = roc_nix_tm_sq_flush_spin(sq);
641                 if (rc) {
642                         plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
643                         return rc;
644                 }
645         }
646
647         node->flags &= ~NIX_TM_NODE_ENABLED;
648
649         /* Disable and flush */
650         rc = nix_tm_smq_xoff(nix, node->parent, true);
651         if (rc) {
652                 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
653                         rc);
654                 goto cleanup;
655         }
656 cleanup:
657         /* Restore cgx state */
658         if (!roc_nix->io_enabled) {
659                 mbox_alloc_msg_nix_lf_stop_rx(mbox);
660                 rc |= mbox_process(mbox);
661         }
662
663         return rc;
664 }
665
666 int
667 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
668 {
669         struct roc_nix *roc_nix = sq->roc_nix;
670         struct nix_tm_node *node, *sibling;
671         struct nix_tm_node_list *list;
672         enum roc_nix_tm_tree tree;
673         struct roc_nix_sq *s_sq;
674         bool once = false;
675         uint16_t qid, s_qid;
676         struct nix *nix;
677         int rc;
678
679         nix = roc_nix_to_nix_priv(roc_nix);
680
681         /* Need not do anything if tree is in disabled state */
682         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
683                 return 0;
684
685         qid = sq->qid;
686         tree = nix->tm_tree;
687         list = nix_tm_node_list(nix, tree);
688
689         /* Find the node for this SQ */
690         node = nix_tm_node_search(nix, qid, tree);
691         if (!node) {
692                 plt_err("Invalid node for sq %u", qid);
693                 return -EFAULT;
694         }
695
696         /* Enable all the siblings back */
697         TAILQ_FOREACH(sibling, list, node) {
698                 if (sibling->parent != node->parent)
699                         continue;
700
701                 if (sibling->id == qid)
702                         continue;
703
704                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
705                         continue;
706
707                 s_qid = sibling->id;
708                 s_sq = nix->sqs[s_qid];
709                 if (!s_sq)
710                         continue;
711
712                 if (!once) {
713                         /* Enable back if any SQ is still present */
714                         rc = nix_tm_smq_xoff(nix, node->parent, false);
715                         if (rc) {
716                                 plt_err("Failed to enable smq %u, rc=%d",
717                                         node->parent->hw_id, rc);
718                                 return rc;
719                         }
720                         once = true;
721                 }
722
723                 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
724                 if (rc) {
725                         plt_err("Failed to enable sqb aura fc, rc=%d", rc);
726                         return rc;
727                 }
728         }
729
730         if (!nix->rx_pause)
731                 return 0;
732
733         /* Restore backpressure */
734         rc = nix_tm_bp_config_set(roc_nix, true);
735         if (rc) {
736                 plt_err("Failed to restore backpressure, rc=%d", rc);
737                 return rc;
738         }
739
740         return 0;
741 }
742
743 int
744 nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
745                      bool rr_quantum_only)
746 {
747         struct mbox *mbox = (&nix->dev)->mbox;
748         uint16_t qid = node->id, smq;
749         uint64_t rr_quantum;
750         int rc;
751
752         smq = node->parent->hw_id;
753         rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
754
755         if (rr_quantum_only)
756                 plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
757                            rr_quantum);
758         else
759                 plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
760                            qid, smq, rr_quantum);
761
762         if (qid > nix->nb_tx_queues)
763                 return -EFAULT;
764
765         if (roc_model_is_cn9k()) {
766                 struct nix_aq_enq_req *aq;
767
768                 aq = mbox_alloc_msg_nix_aq_enq(mbox);
769                 if (!aq)
770                         return -ENOSPC;
771
772                 aq->qidx = qid;
773                 aq->ctype = NIX_AQ_CTYPE_SQ;
774                 aq->op = NIX_AQ_INSTOP_WRITE;
775
776                 /* smq update only when needed */
777                 if (!rr_quantum_only) {
778                         aq->sq.smq = smq;
779                         aq->sq_mask.smq = ~aq->sq_mask.smq;
780                 }
781                 aq->sq.smq_rr_quantum = rr_quantum;
782                 aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
783         } else {
784                 struct nix_cn10k_aq_enq_req *aq;
785
786                 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
787                 if (!aq)
788                         return -ENOSPC;
789
790                 aq->qidx = qid;
791                 aq->ctype = NIX_AQ_CTYPE_SQ;
792                 aq->op = NIX_AQ_INSTOP_WRITE;
793
794                 /* smq update only when needed */
795                 if (!rr_quantum_only) {
796                         aq->sq.smq = smq;
797                         aq->sq_mask.smq = ~aq->sq_mask.smq;
798                 }
799                 aq->sq.smq_rr_weight = rr_quantum;
800                 aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
801         }
802
803         rc = mbox_process(mbox);
804         if (rc)
805                 plt_err("Failed to set smq, rc=%d", rc);
806         return rc;
807 }
808
809 int
810 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
811                          bool above_thresh)
812 {
813         uint16_t avail, thresh, to_free = 0, schq;
814         struct mbox *mbox = (&nix->dev)->mbox;
815         struct nix_txsch_free_req *req;
816         struct plt_bitmap *bmp;
817         uint64_t slab = 0;
818         uint32_t pos = 0;
819         int rc = -ENOSPC;
820
821         bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
822         thresh =
823                 contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
824         plt_bitmap_scan_init(bmp);
825
826         avail = nix_tm_resource_avail(nix, hw_lvl, contig);
827
828         if (above_thresh) {
829                 /* Release only above threshold */
830                 if (avail > thresh)
831                         to_free = avail - thresh;
832         } else {
833                 /* Release everything */
834                 to_free = avail;
835         }
836
837         /* Now release resources to AF */
838         while (to_free) {
839                 if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
840                         break;
841
842                 schq = bitmap_ctzll(slab);
843                 slab &= ~(1ULL << schq);
844                 schq += pos;
845
846                 /* Free to AF */
847                 req = mbox_alloc_msg_nix_txsch_free(mbox);
848                 if (req == NULL)
849                         return rc;
850                 req->flags = 0;
851                 req->schq_lvl = hw_lvl;
852                 req->schq = schq;
853                 rc = mbox_process(mbox);
854                 if (rc) {
855                         plt_err("failed to release hwres %s(%u) rc %d",
856                                 nix_tm_hwlvl2str(hw_lvl), schq, rc);
857                         return rc;
858                 }
859
860                 plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
861                            schq);
862                 plt_bitmap_clear(bmp, schq);
863                 to_free--;
864         }
865
866         if (to_free) {
867                 plt_err("resource inconsistency for %s(%u)",
868                         nix_tm_hwlvl2str(hw_lvl), contig);
869                 return -EFAULT;
870         }
871         return 0;
872 }
873
874 int
875 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
876 {
877         struct mbox *mbox = (&nix->dev)->mbox;
878         struct nix_txsch_free_req *req;
879         struct plt_bitmap *bmp;
880         uint16_t avail, hw_id;
881         uint8_t hw_lvl;
882         int rc = -ENOSPC;
883
884         hw_lvl = node->hw_lvl;
885         hw_id = node->hw_id;
886         bmp = nix->schq_bmp[hw_lvl];
887         /* Free specific HW resource */
888         plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
889                    nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
890                    node);
891
892         avail = nix_tm_resource_avail(nix, hw_lvl, false);
893         /* Always for now free to discontiguous queue when avail
894          * is not sufficient.
895          */
896         if (nix->discontig_rsvd[hw_lvl] &&
897             avail < nix->discontig_rsvd[hw_lvl]) {
898                 PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
899                 PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
900                 plt_bitmap_set(bmp, hw_id);
901                 node->hw_id = NIX_TM_HW_ID_INVALID;
902                 node->flags &= ~NIX_TM_NODE_HWRES;
903                 return 0;
904         }
905
906         /* Free to AF */
907         req = mbox_alloc_msg_nix_txsch_free(mbox);
908         if (req == NULL)
909                 return rc;
910         req->flags = 0;
911         req->schq_lvl = node->hw_lvl;
912         req->schq = hw_id;
913         rc = mbox_process(mbox);
914         if (rc) {
915                 plt_err("failed to release hwres %s(%u) rc %d",
916                         nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
917                 return rc;
918         }
919
920         /* Mark parent as dirty for reallocing it's children */
921         if (node->parent)
922                 node->parent->child_realloc = true;
923
924         node->hw_id = NIX_TM_HW_ID_INVALID;
925         node->flags &= ~NIX_TM_NODE_HWRES;
926         plt_tm_dbg("Released hwres %s(%u) to af",
927                    nix_tm_hwlvl2str(node->hw_lvl), hw_id);
928         return 0;
929 }
930
931 int
932 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
933                    enum roc_nix_tm_tree tree, bool free)
934 {
935         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
936         struct nix_tm_shaper_profile *profile;
937         struct nix_tm_node *node, *child;
938         struct nix_tm_node_list *list;
939         uint32_t profile_id;
940         int rc;
941
942         plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
943
944         node = nix_tm_node_search(nix, node_id, tree);
945         if (!node)
946                 return NIX_ERR_TM_INVALID_NODE;
947
948         list = nix_tm_node_list(nix, tree);
949         /* Check for any existing children */
950         TAILQ_FOREACH(child, list, node) {
951                 if (child->parent == node)
952                         return NIX_ERR_TM_CHILD_EXISTS;
953         }
954
955         /* Remove shaper profile reference */
956         profile_id = node->shaper_profile_id;
957         profile = nix_tm_shaper_profile_search(nix, profile_id);
958
959         /* Free hw resource locally */
960         if (node->flags & NIX_TM_NODE_HWRES) {
961                 rc = nix_tm_free_node_resource(nix, node);
962                 if (rc)
963                         return rc;
964         }
965
966         if (profile)
967                 profile->ref_cnt--;
968
969         TAILQ_REMOVE(list, node, node);
970
971         plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
972                    "parent %u profile 0x%x tree %u (%p)",
973                    nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
974                    node->priority, node->weight,
975                    node->parent ? node->parent->id : UINT32_MAX,
976                    node->shaper_profile_id, tree, node);
977         /* Free only if requested */
978         if (free)
979                 nix_tm_node_free(node);
980         return 0;
981 }
982
983 static int
984 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
985                     uint16_t *contig_id, int *contig_cnt,
986                     struct nix_tm_node_list *list)
987 {
988         struct nix_tm_node *child;
989         struct plt_bitmap *bmp;
990         uint8_t child_hw_lvl;
991         int spare_schq = -1;
992         uint32_t pos = 0;
993         uint64_t slab;
994         uint16_t schq;
995
996         child_hw_lvl = parent->hw_lvl - 1;
997         bmp = nix->schq_bmp[child_hw_lvl];
998         plt_bitmap_scan_init(bmp);
999         slab = 0;
1000
1001         /* Save spare schq if it is case of RR + SP */
1002         if (parent->rr_prio != 0xf && *contig_cnt > 1)
1003                 spare_schq = *contig_id + parent->rr_prio;
1004
1005         TAILQ_FOREACH(child, list, node) {
1006                 if (!child->parent)
1007                         continue;
1008                 if (child->parent->id != parent->id)
1009                         continue;
1010
1011                 /* Resource never expected to be present */
1012                 if (child->flags & NIX_TM_NODE_HWRES) {
1013                         plt_err("Resource exists for child (%s)%u, id %u (%p)",
1014                                 nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
1015                                 child->id, child);
1016                         return -EFAULT;
1017                 }
1018
1019                 if (!slab)
1020                         plt_bitmap_scan(bmp, &pos, &slab);
1021
1022                 if (child->priority == parent->rr_prio && spare_schq != -1) {
1023                         /* Use spare schq first if present */
1024                         schq = spare_schq;
1025                         spare_schq = -1;
1026                         *contig_cnt = *contig_cnt - 1;
1027
1028                 } else if (child->priority == parent->rr_prio) {
1029                         /* Assign a discontiguous queue */
1030                         if (!slab) {
1031                                 plt_err("Schq not found for Child %u "
1032                                         "lvl %u (%p)",
1033                                         child->id, child->lvl, child);
1034                                 return -ENOENT;
1035                         }
1036
1037                         schq = bitmap_ctzll(slab);
1038                         slab &= ~(1ULL << schq);
1039                         schq += pos;
1040                         plt_bitmap_clear(bmp, schq);
1041                 } else {
1042                         /* Assign a contiguous queue */
1043                         schq = *contig_id + child->priority;
1044                         *contig_cnt = *contig_cnt - 1;
1045                 }
1046
1047                 plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
1048                            nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
1049                            child->id, child);
1050
1051                 child->hw_id = schq;
1052                 child->parent_hw_id = parent->hw_id;
1053                 child->flags |= NIX_TM_NODE_HWRES;
1054         }
1055
1056         return 0;
1057 }
1058
1059 int
1060 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
1061 {
1062         struct nix_tm_node *parent, *root = NULL;
1063         struct plt_bitmap *bmp, *bmp_contig;
1064         struct nix_tm_node_list *list;
1065         uint8_t child_hw_lvl, hw_lvl;
1066         uint16_t contig_id, j;
1067         uint64_t slab = 0;
1068         uint32_t pos = 0;
1069         int cnt, rc;
1070
1071         list = nix_tm_node_list(nix, tree);
1072         /* Walk from TL1 to TL4 parents */
1073         for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
1074                 TAILQ_FOREACH(parent, list, node) {
1075                         child_hw_lvl = parent->hw_lvl - 1;
1076                         if (parent->hw_lvl != hw_lvl)
1077                                 continue;
1078
1079                         /* Remember root for future */
1080                         if (parent->hw_lvl == nix->tm_root_lvl)
1081                                 root = parent;
1082
1083                         if (!parent->child_realloc) {
1084                                 /* Skip when parent is not dirty */
1085                                 if (nix_tm_child_res_valid(list, parent))
1086                                         continue;
1087                                 plt_err("Parent not dirty but invalid "
1088                                         "child res parent id %u(lvl %u)",
1089                                         parent->id, parent->lvl);
1090                                 return -EFAULT;
1091                         }
1092
1093                         bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
1094
1095                         /* Prealloc contiguous indices for a parent */
1096                         contig_id = NIX_TM_MAX_HW_TXSCHQ;
1097                         cnt = (int)parent->max_prio + 1;
1098                         if (cnt > 0) {
1099                                 plt_bitmap_scan_init(bmp_contig);
1100                                 if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
1101                                         plt_err("Contig schq not found");
1102                                         return -ENOENT;
1103                                 }
1104                                 contig_id = pos + bitmap_ctzll(slab);
1105
1106                                 /* Check if we have enough */
1107                                 for (j = contig_id; j < contig_id + cnt; j++) {
1108                                         if (!plt_bitmap_get(bmp_contig, j))
1109                                                 break;
1110                                 }
1111
1112                                 if (j != contig_id + cnt) {
1113                                         plt_err("Contig schq not sufficient");
1114                                         return -ENOENT;
1115                                 }
1116
1117                                 for (j = contig_id; j < contig_id + cnt; j++)
1118                                         plt_bitmap_clear(bmp_contig, j);
1119                         }
1120
1121                         /* Assign hw id to all children */
1122                         rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
1123                                                  list);
1124                         if (cnt || rc) {
1125                                 plt_err("Unexpected err, contig res alloc, "
1126                                         "parent %u, of %s, rc=%d, cnt=%d",
1127                                         parent->id, nix_tm_hwlvl2str(hw_lvl),
1128                                         rc, cnt);
1129                                 return -EFAULT;
1130                         }
1131
1132                         /* Clear the dirty bit as children's
1133                          * resources are reallocated.
1134                          */
1135                         parent->child_realloc = false;
1136                 }
1137         }
1138
1139         /* Root is always expected to be there */
1140         if (!root)
1141                 return -EFAULT;
1142
1143         if (root->flags & NIX_TM_NODE_HWRES)
1144                 return 0;
1145
1146         /* Process root node */
1147         bmp = nix->schq_bmp[nix->tm_root_lvl];
1148         plt_bitmap_scan_init(bmp);
1149         if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1150                 plt_err("Resource not allocated for root");
1151                 return -EIO;
1152         }
1153
1154         root->hw_id = pos + bitmap_ctzll(slab);
1155         root->flags |= NIX_TM_NODE_HWRES;
1156         plt_bitmap_clear(bmp, root->hw_id);
1157
1158         /* Get TL1 id as well when root is not TL1 */
1159         if (!nix_tm_have_tl1_access(nix)) {
1160                 bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
1161
1162                 plt_bitmap_scan_init(bmp);
1163                 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1164                         plt_err("Resource not found for TL1");
1165                         return -EIO;
1166                 }
1167                 root->parent_hw_id = pos + bitmap_ctzll(slab);
1168                 plt_bitmap_clear(bmp, root->parent_hw_id);
1169         }
1170
1171         plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
1172                    nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
1173
1174         return 0;
1175 }
1176
1177 void
1178 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
1179 {
1180         uint8_t lvl;
1181         uint16_t i;
1182
1183         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1184                 for (i = 0; i < rsp->schq[lvl]; i++)
1185                         plt_bitmap_set(nix->schq_bmp[lvl],
1186                                        rsp->schq_list[lvl][i]);
1187
1188                 for (i = 0; i < rsp->schq_contig[lvl]; i++)
1189                         plt_bitmap_set(nix->schq_contig_bmp[lvl],
1190                                        rsp->schq_contig_list[lvl][i]);
1191         }
1192 }
1193
1194 int
1195 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
1196 {
1197         uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
1198         struct mbox *mbox = (&nix->dev)->mbox;
1199         uint16_t schq[NIX_TXSCH_LVL_CNT];
1200         struct nix_txsch_alloc_req *req;
1201         struct nix_txsch_alloc_rsp *rsp;
1202         uint8_t hw_lvl, i;
1203         bool pend;
1204         int rc;
1205
1206         memset(schq, 0, sizeof(schq));
1207         memset(schq_contig, 0, sizeof(schq_contig));
1208
1209         /* Estimate requirement */
1210         rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
1211         if (!rc)
1212                 return 0;
1213
1214         /* Release existing contiguous resources when realloc requested
1215          * as there is no way to guarantee continuity of old with new.
1216          */
1217         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1218                 if (schq_contig[hw_lvl])
1219                         nix_tm_release_resources(nix, hw_lvl, true, false);
1220         }
1221
1222         /* Alloc as needed */
1223         do {
1224                 pend = false;
1225                 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
1226                 if (!req) {
1227                         rc = -ENOMEM;
1228                         goto alloc_err;
1229                 }
1230                 mbox_memcpy(req->schq, schq, sizeof(req->schq));
1231                 mbox_memcpy(req->schq_contig, schq_contig,
1232                             sizeof(req->schq_contig));
1233
1234                 /* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
1235                  * So split alloc to multiple requests.
1236                  */
1237                 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1238                         if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
1239                                 req->schq[i] = MAX_TXSCHQ_PER_FUNC;
1240                         schq[i] -= req->schq[i];
1241
1242                         if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
1243                                 req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
1244                         schq_contig[i] -= req->schq_contig[i];
1245
1246                         if (schq[i] || schq_contig[i])
1247                                 pend = true;
1248                 }
1249
1250                 rc = mbox_process_msg(mbox, (void *)&rsp);
1251                 if (rc)
1252                         goto alloc_err;
1253
1254                 nix_tm_copy_rsp_to_nix(nix, rsp);
1255         } while (pend);
1256
1257         nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
1258         return 0;
1259 alloc_err:
1260         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1261                 if (nix_tm_release_resources(nix, i, true, false))
1262                         plt_err("Failed to release contig resources of "
1263                                 "lvl %d on error",
1264                                 i);
1265                 if (nix_tm_release_resources(nix, i, false, false))
1266                         plt_err("Failed to release discontig resources of "
1267                                 "lvl %d on error",
1268                                 i);
1269         }
1270         return rc;
1271 }
1272
1273 int
1274 nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
1275 {
1276         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1277         uint32_t nonleaf_id = nix->nb_tx_queues;
1278         struct nix_tm_node *node = NULL;
1279         uint8_t leaf_lvl, lvl, lvl_end;
1280         uint32_t parent, i;
1281         int rc = 0;
1282
1283         /* Add ROOT, SCH1, SCH2, SCH3, [SCH4]  nodes */
1284         parent = ROC_NIX_TM_NODE_ID_INVALID;
1285         /* With TL1 access we have an extra level */
1286         lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
1287                                                        ROC_TM_LVL_SCH3);
1288
1289         for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1290                 rc = -ENOMEM;
1291                 node = nix_tm_node_alloc();
1292                 if (!node)
1293                         goto error;
1294
1295                 node->id = nonleaf_id;
1296                 node->parent_id = parent;
1297                 node->priority = 0;
1298                 node->weight = NIX_TM_DFLT_RR_WT;
1299                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1300                 node->lvl = lvl;
1301                 node->tree = ROC_NIX_TM_DEFAULT;
1302
1303                 rc = nix_tm_node_add(roc_nix, node);
1304                 if (rc)
1305                         goto error;
1306                 parent = nonleaf_id;
1307                 nonleaf_id++;
1308         }
1309
1310         parent = nonleaf_id - 1;
1311         leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1312                                                         ROC_TM_LVL_SCH4);
1313
1314         /* Add leaf nodes */
1315         for (i = 0; i < nix->nb_tx_queues; i++) {
1316                 rc = -ENOMEM;
1317                 node = nix_tm_node_alloc();
1318                 if (!node)
1319                         goto error;
1320
1321                 node->id = i;
1322                 node->parent_id = parent;
1323                 node->priority = 0;
1324                 node->weight = NIX_TM_DFLT_RR_WT;
1325                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1326                 node->lvl = leaf_lvl;
1327                 node->tree = ROC_NIX_TM_DEFAULT;
1328
1329                 rc = nix_tm_node_add(roc_nix, node);
1330                 if (rc)
1331                         goto error;
1332         }
1333
1334         return 0;
1335 error:
1336         nix_tm_node_free(node);
1337         return rc;
1338 }
1339
1340 int
1341 roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
1342 {
1343         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1344         uint32_t nonleaf_id = nix->nb_tx_queues;
1345         struct nix_tm_node *node = NULL;
1346         uint8_t leaf_lvl, lvl, lvl_end;
1347         uint32_t parent, i;
1348         int rc = 0;
1349
1350         /* Add ROOT, SCH1, SCH2 nodes */
1351         parent = ROC_NIX_TM_NODE_ID_INVALID;
1352         lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
1353                                                        ROC_TM_LVL_SCH2);
1354
1355         for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1356                 rc = -ENOMEM;
1357                 node = nix_tm_node_alloc();
1358                 if (!node)
1359                         goto error;
1360
1361                 node->id = nonleaf_id;
1362                 node->parent_id = parent;
1363                 node->priority = 0;
1364                 node->weight = NIX_TM_DFLT_RR_WT;
1365                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1366                 node->lvl = lvl;
1367                 node->tree = ROC_NIX_TM_RLIMIT;
1368
1369                 rc = nix_tm_node_add(roc_nix, node);
1370                 if (rc)
1371                         goto error;
1372                 parent = nonleaf_id;
1373                 nonleaf_id++;
1374         }
1375
1376         /* SMQ is mapped to SCH4 when we have TL1 access and SCH3 otherwise */
1377         lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
1378
1379         /* Add per queue SMQ nodes i.e SCH4 / SCH3 */
1380         for (i = 0; i < nix->nb_tx_queues; i++) {
1381                 rc = -ENOMEM;
1382                 node = nix_tm_node_alloc();
1383                 if (!node)
1384                         goto error;
1385
1386                 node->id = nonleaf_id + i;
1387                 node->parent_id = parent;
1388                 node->priority = 0;
1389                 node->weight = NIX_TM_DFLT_RR_WT;
1390                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1391                 node->lvl = lvl;
1392                 node->tree = ROC_NIX_TM_RLIMIT;
1393
1394                 rc = nix_tm_node_add(roc_nix, node);
1395                 if (rc)
1396                         goto error;
1397         }
1398
1399         parent = nonleaf_id;
1400         leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1401                                                         ROC_TM_LVL_SCH4);
1402
1403         /* Add leaf nodes */
1404         for (i = 0; i < nix->nb_tx_queues; i++) {
1405                 rc = -ENOMEM;
1406                 node = nix_tm_node_alloc();
1407                 if (!node)
1408                         goto error;
1409
1410                 node->id = i;
1411                 node->parent_id = parent + i;
1412                 node->priority = 0;
1413                 node->weight = NIX_TM_DFLT_RR_WT;
1414                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1415                 node->lvl = leaf_lvl;
1416                 node->tree = ROC_NIX_TM_RLIMIT;
1417
1418                 rc = nix_tm_node_add(roc_nix, node);
1419                 if (rc)
1420                         goto error;
1421         }
1422
1423         return 0;
1424 error:
1425         nix_tm_node_free(node);
1426         return rc;
1427 }
1428
1429 int
1430 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
1431 {
1432         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1433         struct nix_tm_shaper_profile *profile;
1434         struct nix_tm_node *node, *next_node;
1435         struct nix_tm_node_list *list;
1436         enum roc_nix_tm_tree tree;
1437         uint32_t profile_id;
1438         int rc = 0;
1439
1440         for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
1441                 if (!(tree_mask & BIT(tree)))
1442                         continue;
1443
1444                 plt_tm_dbg("Freeing resources of tree %u", tree);
1445
1446                 list = nix_tm_node_list(nix, tree);
1447                 next_node = TAILQ_FIRST(list);
1448                 while (next_node) {
1449                         node = next_node;
1450                         next_node = TAILQ_NEXT(node, node);
1451
1452                         if (!nix_tm_is_leaf(nix, node->lvl) &&
1453                             node->flags & NIX_TM_NODE_HWRES) {
1454                                 /* Clear xoff in path for flush to succeed */
1455                                 rc = nix_tm_clear_path_xoff(nix, node);
1456                                 if (rc)
1457                                         return rc;
1458                                 rc = nix_tm_free_node_resource(nix, node);
1459                                 if (rc)
1460                                         return rc;
1461                         }
1462                 }
1463
1464                 /* Leave software elements if needed */
1465                 if (hw_only)
1466                         continue;
1467
1468                 next_node = TAILQ_FIRST(list);
1469                 while (next_node) {
1470                         node = next_node;
1471                         next_node = TAILQ_NEXT(node, node);
1472
1473                         plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
1474                                    node->id, node);
1475
1476                         profile_id = node->shaper_profile_id;
1477                         profile = nix_tm_shaper_profile_search(nix, profile_id);
1478                         if (profile)
1479                                 profile->ref_cnt--;
1480
1481                         TAILQ_REMOVE(list, node, node);
1482                         nix_tm_node_free(node);
1483                 }
1484         }
1485         return rc;
1486 }
1487
1488 int
1489 nix_tm_conf_init(struct roc_nix *roc_nix)
1490 {
1491         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1492         uint32_t bmp_sz, hw_lvl;
1493         void *bmp_mem;
1494         int rc, i;
1495
1496         PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1497         PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1498                           ROC_NIX_TM_SHAPER_PROFILE_SZ);
1499
1500         nix->tm_flags = 0;
1501         for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1502                 TAILQ_INIT(&nix->trees[i]);
1503
1504         TAILQ_INIT(&nix->shaper_profile_list);
1505         nix->tm_rate_min = 1E9; /* 1Gbps */
1506
1507         rc = -ENOMEM;
1508         bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1509         bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1510         if (!bmp_mem)
1511                 return rc;
1512         nix->schq_bmp_mem = bmp_mem;
1513
1514         /* Init contiguous and discontiguous bitmap per lvl */
1515         rc = -EIO;
1516         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1517                 /* Bitmap for discontiguous resource */
1518                 nix->schq_bmp[hw_lvl] =
1519                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1520                 if (!nix->schq_bmp[hw_lvl])
1521                         goto exit;
1522
1523                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1524
1525                 /* Bitmap for contiguous resource */
1526                 nix->schq_contig_bmp[hw_lvl] =
1527                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1528                 if (!nix->schq_contig_bmp[hw_lvl])
1529                         goto exit;
1530
1531                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1532         }
1533
1534         /* Disable TL1 Static Priority when VF's are enabled
1535          * as otherwise VF's TL2 reallocation will be needed
1536          * runtime to support a specific topology of PF.
1537          */
1538         if (nix->pci_dev->max_vfs)
1539                 nix->tm_flags |= NIX_TM_TL1_NO_SP;
1540
1541         /* TL1 access is only for PF's */
1542         if (roc_nix_is_pf(roc_nix)) {
1543                 nix->tm_flags |= NIX_TM_TL1_ACCESS;
1544                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
1545         } else {
1546                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
1547         }
1548
1549         return 0;
1550 exit:
1551         nix_tm_conf_fini(roc_nix);
1552         return rc;
1553 }
1554
1555 void
1556 nix_tm_conf_fini(struct roc_nix *roc_nix)
1557 {
1558         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1559         uint16_t hw_lvl;
1560
1561         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1562                 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
1563                 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
1564         }
1565         plt_free(nix->schq_bmp_mem);
1566 }