common/cnxk: support priority flow control
[dpdk.git] / drivers / common / cnxk / roc_nix_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 static inline int
9 bitmap_ctzll(uint64_t slab)
10 {
11         if (slab == 0)
12                 return 0;
13
14         return __builtin_ctzll(slab);
15 }
16
17 void
18 nix_tm_clear_shaper_profiles(struct nix *nix)
19 {
20         struct nix_tm_shaper_profile *shaper_profile, *tmp;
21         struct nix_tm_shaper_profile_list *list;
22
23         list = &nix->shaper_profile_list;
24         PLT_TAILQ_FOREACH_SAFE(shaper_profile, list, shaper, tmp) {
25                 if (shaper_profile->ref_cnt)
26                         plt_warn("Shaper profile %u has non zero references",
27                                  shaper_profile->id);
28                 TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
29                 nix_tm_shaper_profile_free(shaper_profile);
30         }
31 }
32
33 static int
34 nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
35 {
36         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
37         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
38         struct nix_tm_shaper_profile *profile;
39         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
40         struct mbox *mbox = (&nix->dev)->mbox;
41         struct nix_txschq_config *req;
42         int rc = -EFAULT;
43         uint32_t hw_lvl;
44         uint8_t k = 0;
45
46         memset(regval, 0, sizeof(regval));
47         memset(regval_mask, 0, sizeof(regval_mask));
48
49         profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
50         hw_lvl = node->hw_lvl;
51
52         /* Need this trigger to configure TL1 */
53         if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
54                 /* Prepare default conf for TL1 */
55                 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
56                 req->lvl = NIX_TXSCH_LVL_TL1;
57
58                 k = nix_tm_tl1_default_prep(node->parent_hw_id, req->reg,
59                                             req->regval);
60                 req->num_regs = k;
61                 rc = mbox_process(mbox);
62                 if (rc)
63                         goto error;
64         }
65
66         /* Prepare topology config */
67         k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
68
69         /* Prepare schedule config */
70         k += nix_tm_sched_reg_prep(nix, node, &reg[k], &regval[k]);
71
72         /* Prepare shaping config */
73         k += nix_tm_shaper_reg_prep(node, profile, &reg[k], &regval[k]);
74
75         if (!k)
76                 return 0;
77
78         /* Copy and send config mbox */
79         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
80         req->lvl = hw_lvl;
81         req->num_regs = k;
82
83         mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
84         mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
85         mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
86
87         rc = mbox_process(mbox);
88         if (rc)
89                 goto error;
90
91         return 0;
92 error:
93         plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
94         return rc;
95 }
96
97 int
98 nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
99 {
100         struct nix_tm_node_list *list;
101         bool is_pf_or_lbk = false;
102         struct nix_tm_node *node;
103         bool skip_bp = false;
104         uint32_t hw_lvl;
105         int rc = 0;
106
107         list = nix_tm_node_list(nix, tree);
108
109         if ((!dev_is_vf(&nix->dev) || nix->lbk_link) && !nix->sdp_link)
110                 is_pf_or_lbk = true;
111
112         for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
113                 TAILQ_FOREACH(node, list, node) {
114                         if (node->hw_lvl != hw_lvl)
115                                 continue;
116
117                         /* Only one TL3/TL2 Link config should have BP enable
118                          * set per channel only for PF or lbk vf.
119                          */
120                         node->bp_capa = 0;
121                         if (is_pf_or_lbk && !skip_bp &&
122                             node->hw_lvl == nix->tm_link_cfg_lvl) {
123                                 node->bp_capa = 1;
124                                 skip_bp = false;
125                         }
126
127                         rc = nix_tm_node_reg_conf(nix, node);
128                         if (rc)
129                                 goto exit;
130                 }
131         }
132 exit:
133         return rc;
134 }
135
136 int
137 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
138 {
139         struct nix_tm_node *child, *parent;
140         struct nix_tm_node_list *list;
141         uint32_t rr_prio, max_prio;
142         uint32_t rr_num = 0;
143
144         list = nix_tm_node_list(nix, tree);
145
146         /* Release all the node hw resources locally
147          * if parent marked as dirty and resource exists.
148          */
149         TAILQ_FOREACH(child, list, node) {
150                 /* Release resource only if parent direct hierarchy changed */
151                 if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
152                     child->parent->child_realloc) {
153                         nix_tm_free_node_resource(nix, child);
154                 }
155                 child->max_prio = UINT32_MAX;
156         }
157
158         TAILQ_FOREACH(parent, list, node) {
159                 /* Count group of children of same priority i.e are RR */
160                 rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
161                                          &max_prio);
162
163                 /* Assuming that multiple RR groups are
164                  * not configured based on capability.
165                  */
166                 parent->rr_prio = rr_prio;
167                 parent->rr_num = rr_num;
168                 parent->max_prio = max_prio;
169         }
170
171         return 0;
172 }
173
174 static int
175 nix_tm_root_node_get(struct nix *nix, int tree)
176 {
177         struct nix_tm_node_list *list = nix_tm_node_list(nix, tree);
178         struct nix_tm_node *tm_node;
179
180         TAILQ_FOREACH(tm_node, list, node) {
181                 if (tm_node->hw_lvl == nix->tm_root_lvl)
182                         return 1;
183         }
184
185         return 0;
186 }
187
188 int
189 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
190 {
191         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
192         struct nix_tm_shaper_profile *profile;
193         uint32_t node_id, parent_id, lvl;
194         struct nix_tm_node *parent_node;
195         uint32_t priority, profile_id;
196         uint8_t hw_lvl, exp_next_lvl;
197         enum roc_nix_tm_tree tree;
198         int rc;
199
200         node_id = node->id;
201         priority = node->priority;
202         parent_id = node->parent_id;
203         profile_id = node->shaper_profile_id;
204         lvl = node->lvl;
205         tree = node->tree;
206
207         plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
208                    "parent %u profile 0x%x tree %u",
209                    nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
210                    priority, node->weight, parent_id, profile_id, tree);
211
212         if (tree >= ROC_NIX_TM_TREE_MAX)
213                 return NIX_ERR_PARAM;
214
215         /* Translate sw level id's to nix hw level id's */
216         hw_lvl = nix_tm_lvl2nix(nix, lvl);
217         if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
218                 return NIX_ERR_TM_INVALID_LVL;
219
220         /* Leaf nodes have to be same priority */
221         if (nix_tm_is_leaf(nix, lvl) && priority != 0)
222                 return NIX_ERR_TM_INVALID_PRIO;
223
224         parent_node = nix_tm_node_search(nix, parent_id, tree);
225
226         if (node_id < nix->nb_tx_queues)
227                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
228         else
229                 exp_next_lvl = hw_lvl + 1;
230
231         /* Check if there is no parent node yet */
232         if (hw_lvl != nix->tm_root_lvl &&
233             (!parent_node || parent_node->hw_lvl != exp_next_lvl))
234                 return NIX_ERR_TM_INVALID_PARENT;
235
236         /* Check if a node already exists */
237         if (nix_tm_node_search(nix, node_id, tree))
238                 return NIX_ERR_TM_NODE_EXISTS;
239
240         /* Check if root node exists */
241         if (hw_lvl == nix->tm_root_lvl && nix_tm_root_node_get(nix, tree))
242                 return NIX_ERR_TM_NODE_EXISTS;
243
244         profile = nix_tm_shaper_profile_search(nix, profile_id);
245         if (!nix_tm_is_leaf(nix, lvl)) {
246                 /* Check if shaper profile exists for non leaf node */
247                 if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
248                         return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
249
250                 /* Packet mode in profile should match with that of tm node */
251                 if (profile && profile->pkt_mode != node->pkt_mode)
252                         return NIX_ERR_TM_PKT_MODE_MISMATCH;
253         }
254
255         /* Check if there is second DWRR already in siblings or holes in prio */
256         rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
257         if (rc)
258                 return rc;
259
260         if (node->weight > roc_nix_tm_max_sched_wt_get())
261                 return NIX_ERR_TM_WEIGHT_EXCEED;
262
263         /* Maintain minimum weight */
264         if (!node->weight)
265                 node->weight = 1;
266
267         node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
268         node->rr_prio = 0xF;
269         node->max_prio = UINT32_MAX;
270         node->hw_id = NIX_TM_HW_ID_INVALID;
271         node->flags = 0;
272
273         if (profile)
274                 profile->ref_cnt++;
275
276         node->parent = parent_node;
277         if (parent_node)
278                 parent_node->child_realloc = true;
279         node->parent_hw_id = NIX_TM_HW_ID_INVALID;
280
281         TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
282         plt_tm_dbg("Added node %s lvl %u id %u (%p)",
283                    nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
284         return 0;
285 }
286
287 int
288 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
289 {
290         struct mbox *mbox = (&nix->dev)->mbox;
291         struct nix_txschq_config *req;
292         struct nix_tm_node *p;
293         int rc;
294
295         /* Enable nodes in path for flush to succeed */
296         if (!nix_tm_is_leaf(nix, node->lvl))
297                 p = node;
298         else
299                 p = node->parent;
300         while (p) {
301                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
302                     (p->flags & NIX_TM_NODE_HWRES)) {
303                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
304                         req->lvl = p->hw_lvl;
305                         req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
306                                                             req->regval);
307                         rc = mbox_process(mbox);
308                         if (rc)
309                                 return rc;
310
311                         p->flags |= NIX_TM_NODE_ENABLED;
312                 }
313                 p = p->parent;
314         }
315
316         return 0;
317 }
318
319 int
320 nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
321                      bool enable)
322 {
323         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
324         enum roc_nix_tm_tree tree = nix->tm_tree;
325         struct mbox *mbox = (&nix->dev)->mbox;
326         struct nix_txschq_config *req = NULL;
327         struct nix_tm_node_list *list;
328         struct nix_tm_node *sq_node;
329         struct nix_tm_node *parent;
330         struct nix_tm_node *node;
331         uint8_t k = 0;
332         uint16_t link;
333         int rc = 0;
334
335         sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
336         parent = sq_node->parent;
337         while (parent) {
338                 if (parent->lvl == ROC_TM_LVL_SCH2)
339                         break;
340
341                 parent = parent->parent;
342         }
343
344         list = nix_tm_node_list(nix, tree);
345         link = nix->tx_link;
346
347         if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
348                 rc = -EINVAL;
349                 goto err;
350         }
351
352         TAILQ_FOREACH(node, list, node) {
353                 if (node->hw_lvl != nix->tm_link_cfg_lvl)
354                         continue;
355
356                 if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
357                         continue;
358
359                 if (node->hw_id != parent->hw_id)
360                         continue;
361
362                 if (!req) {
363                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
364                         req->lvl = nix->tm_link_cfg_lvl;
365                         k = 0;
366                 }
367
368                 req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
369                 req->regval[k] = enable ? tc : 0;
370                 req->regval[k] |= enable ? BIT_ULL(13) : 0;
371                 req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
372                 k++;
373
374                 if (k >= MAX_REGS_PER_MBOX_MSG) {
375                         req->num_regs = k;
376                         rc = mbox_process(mbox);
377                         if (rc)
378                                 goto err;
379                         req = NULL;
380                 }
381         }
382
383         if (req) {
384                 req->num_regs = k;
385                 rc = mbox_process(mbox);
386                 if (rc)
387                         goto err;
388         }
389
390         parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
391         return 0;
392 err:
393         plt_err("Failed to %s bp on link %u, rc=%d(%s)",
394                 enable ? "enable" : "disable", link, rc, roc_error_msg_get(rc));
395         return rc;
396 }
397
398 int
399 nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled)
400 {
401         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
402         struct nix_txschq_config *req = NULL, *rsp;
403         enum roc_nix_tm_tree tree = nix->tm_tree;
404         struct mbox *mbox = (&nix->dev)->mbox;
405         struct nix_tm_node_list *list;
406         struct nix_tm_node *node;
407         bool found = false;
408         uint8_t enable = 1;
409         uint8_t k = 0, i;
410         uint16_t link;
411         int rc = 0;
412
413         list = nix_tm_node_list(nix, tree);
414         link = nix->tx_link;
415
416         TAILQ_FOREACH(node, list, node) {
417                 if (node->hw_lvl != nix->tm_link_cfg_lvl)
418                         continue;
419
420                 if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
421                         continue;
422
423                 found = true;
424                 if (!req) {
425                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
426                         req->read = 1;
427                         req->lvl = nix->tm_link_cfg_lvl;
428                         k = 0;
429                 }
430
431                 req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
432                 k++;
433
434                 if (k >= MAX_REGS_PER_MBOX_MSG) {
435                         req->num_regs = k;
436                         rc = mbox_process_msg(mbox, (void **)&rsp);
437                         if (rc || rsp->num_regs != k)
438                                 goto err;
439                         req = NULL;
440
441                         /* Report it as enabled only if enabled or all */
442                         for (i = 0; i < k; i++)
443                                 enable &= !!(rsp->regval[i] & BIT_ULL(13));
444                 }
445         }
446
447         if (req) {
448                 req->num_regs = k;
449                 rc = mbox_process_msg(mbox, (void **)&rsp);
450                 if (rc)
451                         goto err;
452                 /* Report it as enabled only if enabled or all */
453                 for (i = 0; i < k; i++)
454                         enable &= !!(rsp->regval[i] & BIT_ULL(13));
455         }
456
457         *is_enabled = found ? !!enable : false;
458         return 0;
459 err:
460         plt_err("Failed to get bp status on link %u, rc=%d(%s)", link, rc,
461                 roc_error_msg_get(rc));
462         return rc;
463 }
464
465 int
466 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
467 {
468         struct mbox *mbox = (&nix->dev)->mbox;
469         struct nix_txschq_config *req;
470         uint16_t smq;
471         int rc;
472
473         smq = node->hw_id;
474         plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
475                    enable ? "enable" : "disable");
476
477         rc = nix_tm_clear_path_xoff(nix, node);
478         if (rc)
479                 return rc;
480
481         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
482         req->lvl = NIX_TXSCH_LVL_SMQ;
483         req->num_regs = 1;
484
485         req->reg[0] = NIX_AF_SMQX_CFG(smq);
486         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
487         req->regval_mask[0] =
488                 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
489
490         return mbox_process(mbox);
491 }
492
493 int
494 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
495                      uint16_t *smq)
496 {
497         struct nix_tm_node *node;
498         int rc;
499
500         node = nix_tm_node_search(nix, sq, nix->tm_tree);
501
502         /* Check if we found a valid leaf node */
503         if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
504             node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
505                 return -EIO;
506         }
507
508         /* Get SMQ Id of leaf node's parent */
509         *smq = node->parent->hw_id;
510         *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
511
512         rc = nix_tm_smq_xoff(nix, node->parent, false);
513         if (rc)
514                 return rc;
515         node->flags |= NIX_TM_NODE_ENABLED;
516         return 0;
517 }
518
519 int
520 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
521 {
522         struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
523         uint16_t sqb_cnt, head_off, tail_off;
524         uint64_t wdata, val, prev;
525         uint16_t qid = sq->qid;
526         int64_t *regaddr;
527         uint64_t timeout; /* 10's of usec */
528
529         /* Wait for enough time based on shaper min rate */
530         timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
531         /* Wait for worst case scenario of this SQ being last priority
532          * and so have to wait for all other SQ's drain out by their own.
533          */
534         timeout = timeout * nix->nb_tx_queues;
535         timeout = timeout / nix->tm_rate_min;
536         if (!timeout)
537                 timeout = 10000;
538
539         wdata = ((uint64_t)qid << 32);
540         regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
541         val = roc_atomic64_add_nosync(wdata, regaddr);
542
543         /* Spin multiple iterations as "sq->fc_cache_pkts" can still
544          * have space to send pkts even though fc_mem is disabled
545          */
546
547         while (true) {
548                 prev = val;
549                 plt_delay_us(10);
550                 val = roc_atomic64_add_nosync(wdata, regaddr);
551                 /* Continue on error */
552                 if (val & BIT_ULL(63))
553                         continue;
554
555                 if (prev != val)
556                         continue;
557
558                 sqb_cnt = val & 0xFFFF;
559                 head_off = (val >> 20) & 0x3F;
560                 tail_off = (val >> 28) & 0x3F;
561
562                 /* SQ reached quiescent state */
563                 if (sqb_cnt <= 1 && head_off == tail_off &&
564                     (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
565                         break;
566                 }
567
568                 /* Timeout */
569                 if (!timeout)
570                         goto exit;
571                 timeout--;
572         }
573
574         return 0;
575 exit:
576         roc_nix_tm_dump(sq->roc_nix);
577         roc_nix_queues_ctx_dump(sq->roc_nix);
578         return -EFAULT;
579 }
580
581 /* Flush and disable tx queue and its parent SMQ */
582 int
583 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
584 {
585         struct roc_nix *roc_nix = sq->roc_nix;
586         struct nix_tm_node *node, *sibling;
587         struct nix_tm_node_list *list;
588         enum roc_nix_tm_tree tree;
589         struct mbox *mbox;
590         struct nix *nix;
591         uint16_t qid;
592         int rc;
593
594         nix = roc_nix_to_nix_priv(roc_nix);
595
596         /* Need not do anything if tree is in disabled state */
597         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
598                 return 0;
599
600         mbox = (&nix->dev)->mbox;
601         qid = sq->qid;
602
603         tree = nix->tm_tree;
604         list = nix_tm_node_list(nix, tree);
605
606         /* Find the node for this SQ */
607         node = nix_tm_node_search(nix, qid, tree);
608         if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
609                 plt_err("Invalid node/state for sq %u", qid);
610                 return -EFAULT;
611         }
612
613         /* Enable CGX RXTX to drain pkts */
614         if (!roc_nix->io_enabled) {
615                 /* Though it enables both RX MCAM Entries and CGX Link
616                  * we assume all the rx queues are stopped way back.
617                  */
618                 mbox_alloc_msg_nix_lf_start_rx(mbox);
619                 rc = mbox_process(mbox);
620                 if (rc) {
621                         plt_err("cgx start failed, rc=%d", rc);
622                         return rc;
623                 }
624         }
625
626         /* Disable backpressure */
627         rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
628         if (rc) {
629                 plt_err("Failed to disable backpressure for flush, rc=%d", rc);
630                 return rc;
631         }
632
633         /* Disable smq xoff for case it was enabled earlier */
634         rc = nix_tm_smq_xoff(nix, node->parent, false);
635         if (rc) {
636                 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
637                         rc);
638                 return rc;
639         }
640
641         /* As per HRM, to disable an SQ, all other SQ's
642          * that feed to same SMQ must be paused before SMQ flush.
643          */
644         TAILQ_FOREACH(sibling, list, node) {
645                 if (sibling->parent != node->parent)
646                         continue;
647                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
648                         continue;
649
650                 qid = sibling->id;
651                 sq = nix->sqs[qid];
652                 if (!sq)
653                         continue;
654
655                 rc = roc_nix_tm_sq_aura_fc(sq, false);
656                 if (rc) {
657                         plt_err("Failed to disable sqb aura fc, rc=%d", rc);
658                         goto cleanup;
659                 }
660
661                 /* Wait for sq entries to be flushed */
662                 rc = roc_nix_tm_sq_flush_spin(sq);
663                 if (rc) {
664                         plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
665                         return rc;
666                 }
667         }
668
669         node->flags &= ~NIX_TM_NODE_ENABLED;
670
671         /* Disable and flush */
672         rc = nix_tm_smq_xoff(nix, node->parent, true);
673         if (rc) {
674                 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
675                         rc);
676                 goto cleanup;
677         }
678 cleanup:
679         /* Restore cgx state */
680         if (!roc_nix->io_enabled) {
681                 mbox_alloc_msg_nix_lf_stop_rx(mbox);
682                 rc |= mbox_process(mbox);
683         }
684
685         return rc;
686 }
687
688 int
689 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
690 {
691         struct roc_nix *roc_nix = sq->roc_nix;
692         struct nix_tm_node *node, *sibling;
693         struct nix_tm_node_list *list;
694         enum roc_nix_tm_tree tree;
695         struct roc_nix_sq *s_sq;
696         bool once = false;
697         uint16_t qid, s_qid;
698         struct nix *nix;
699         int rc;
700
701         nix = roc_nix_to_nix_priv(roc_nix);
702
703         /* Need not do anything if tree is in disabled state */
704         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
705                 return 0;
706
707         qid = sq->qid;
708         tree = nix->tm_tree;
709         list = nix_tm_node_list(nix, tree);
710
711         /* Find the node for this SQ */
712         node = nix_tm_node_search(nix, qid, tree);
713         if (!node) {
714                 plt_err("Invalid node for sq %u", qid);
715                 return -EFAULT;
716         }
717
718         /* Enable all the siblings back */
719         TAILQ_FOREACH(sibling, list, node) {
720                 if (sibling->parent != node->parent)
721                         continue;
722
723                 if (sibling->id == qid)
724                         continue;
725
726                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
727                         continue;
728
729                 s_qid = sibling->id;
730                 s_sq = nix->sqs[s_qid];
731                 if (!s_sq)
732                         continue;
733
734                 if (!once) {
735                         /* Enable back if any SQ is still present */
736                         rc = nix_tm_smq_xoff(nix, node->parent, false);
737                         if (rc) {
738                                 plt_err("Failed to enable smq %u, rc=%d",
739                                         node->parent->hw_id, rc);
740                                 return rc;
741                         }
742                         once = true;
743                 }
744
745                 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
746                 if (rc) {
747                         plt_err("Failed to enable sqb aura fc, rc=%d", rc);
748                         return rc;
749                 }
750         }
751
752         if (!nix->rx_pause)
753                 return 0;
754
755         /* Restore backpressure */
756         rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
757         if (rc) {
758                 plt_err("Failed to restore backpressure, rc=%d", rc);
759                 return rc;
760         }
761
762         return 0;
763 }
764
765 int
766 nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
767                      bool rr_quantum_only)
768 {
769         struct mbox *mbox = (&nix->dev)->mbox;
770         uint16_t qid = node->id, smq;
771         uint64_t rr_quantum;
772         int rc;
773
774         smq = node->parent->hw_id;
775         rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
776
777         if (rr_quantum_only)
778                 plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
779                            rr_quantum);
780         else
781                 plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
782                            qid, smq, rr_quantum);
783
784         if (qid > nix->nb_tx_queues)
785                 return -EFAULT;
786
787         if (roc_model_is_cn9k()) {
788                 struct nix_aq_enq_req *aq;
789
790                 aq = mbox_alloc_msg_nix_aq_enq(mbox);
791                 if (!aq)
792                         return -ENOSPC;
793
794                 aq->qidx = qid;
795                 aq->ctype = NIX_AQ_CTYPE_SQ;
796                 aq->op = NIX_AQ_INSTOP_WRITE;
797
798                 /* smq update only when needed */
799                 if (!rr_quantum_only) {
800                         aq->sq.smq = smq;
801                         aq->sq_mask.smq = ~aq->sq_mask.smq;
802                 }
803                 aq->sq.smq_rr_quantum = rr_quantum;
804                 aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
805         } else {
806                 struct nix_cn10k_aq_enq_req *aq;
807
808                 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
809                 if (!aq)
810                         return -ENOSPC;
811
812                 aq->qidx = qid;
813                 aq->ctype = NIX_AQ_CTYPE_SQ;
814                 aq->op = NIX_AQ_INSTOP_WRITE;
815
816                 /* smq update only when needed */
817                 if (!rr_quantum_only) {
818                         aq->sq.smq = smq;
819                         aq->sq_mask.smq = ~aq->sq_mask.smq;
820                 }
821                 aq->sq.smq_rr_weight = rr_quantum;
822                 aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
823         }
824
825         rc = mbox_process(mbox);
826         if (rc)
827                 plt_err("Failed to set smq, rc=%d", rc);
828         return rc;
829 }
830
831 int
832 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
833                          bool above_thresh)
834 {
835         uint16_t avail, thresh, to_free = 0, schq;
836         struct mbox *mbox = (&nix->dev)->mbox;
837         struct nix_txsch_free_req *req;
838         struct plt_bitmap *bmp;
839         uint64_t slab = 0;
840         uint32_t pos = 0;
841         int rc = -ENOSPC;
842
843         bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
844         thresh =
845                 contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
846         plt_bitmap_scan_init(bmp);
847
848         avail = nix_tm_resource_avail(nix, hw_lvl, contig);
849
850         if (above_thresh) {
851                 /* Release only above threshold */
852                 if (avail > thresh)
853                         to_free = avail - thresh;
854         } else {
855                 /* Release everything */
856                 to_free = avail;
857         }
858
859         /* Now release resources to AF */
860         while (to_free) {
861                 if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
862                         break;
863
864                 schq = bitmap_ctzll(slab);
865                 slab &= ~(1ULL << schq);
866                 schq += pos;
867
868                 /* Free to AF */
869                 req = mbox_alloc_msg_nix_txsch_free(mbox);
870                 if (req == NULL)
871                         return rc;
872                 req->flags = 0;
873                 req->schq_lvl = hw_lvl;
874                 req->schq = schq;
875                 rc = mbox_process(mbox);
876                 if (rc) {
877                         plt_err("failed to release hwres %s(%u) rc %d",
878                                 nix_tm_hwlvl2str(hw_lvl), schq, rc);
879                         return rc;
880                 }
881
882                 plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
883                            schq);
884                 plt_bitmap_clear(bmp, schq);
885                 to_free--;
886         }
887
888         if (to_free) {
889                 plt_err("resource inconsistency for %s(%u)",
890                         nix_tm_hwlvl2str(hw_lvl), contig);
891                 return -EFAULT;
892         }
893         return 0;
894 }
895
896 int
897 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
898 {
899         struct mbox *mbox = (&nix->dev)->mbox;
900         struct nix_txsch_free_req *req;
901         struct plt_bitmap *bmp;
902         uint16_t avail, hw_id;
903         uint8_t hw_lvl;
904         int rc = -ENOSPC;
905
906         hw_lvl = node->hw_lvl;
907         hw_id = node->hw_id;
908         bmp = nix->schq_bmp[hw_lvl];
909         /* Free specific HW resource */
910         plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
911                    nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
912                    node);
913
914         avail = nix_tm_resource_avail(nix, hw_lvl, false);
915         /* Always for now free to discontiguous queue when avail
916          * is not sufficient.
917          */
918         if (nix->discontig_rsvd[hw_lvl] &&
919             avail < nix->discontig_rsvd[hw_lvl]) {
920                 PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
921                 PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
922                 plt_bitmap_set(bmp, hw_id);
923                 node->hw_id = NIX_TM_HW_ID_INVALID;
924                 node->flags &= ~NIX_TM_NODE_HWRES;
925                 return 0;
926         }
927
928         /* Free to AF */
929         req = mbox_alloc_msg_nix_txsch_free(mbox);
930         if (req == NULL)
931                 return rc;
932         req->flags = 0;
933         req->schq_lvl = node->hw_lvl;
934         req->schq = hw_id;
935         rc = mbox_process(mbox);
936         if (rc) {
937                 plt_err("failed to release hwres %s(%u) rc %d",
938                         nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
939                 return rc;
940         }
941
942         /* Mark parent as dirty for reallocing it's children */
943         if (node->parent)
944                 node->parent->child_realloc = true;
945
946         node->hw_id = NIX_TM_HW_ID_INVALID;
947         node->flags &= ~NIX_TM_NODE_HWRES;
948         plt_tm_dbg("Released hwres %s(%u) to af",
949                    nix_tm_hwlvl2str(node->hw_lvl), hw_id);
950         return 0;
951 }
952
953 int
954 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
955                    enum roc_nix_tm_tree tree, bool free)
956 {
957         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
958         struct nix_tm_shaper_profile *profile;
959         struct nix_tm_node *node, *child;
960         struct nix_tm_node_list *list;
961         uint32_t profile_id;
962         int rc;
963
964         plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
965
966         node = nix_tm_node_search(nix, node_id, tree);
967         if (!node)
968                 return NIX_ERR_TM_INVALID_NODE;
969
970         list = nix_tm_node_list(nix, tree);
971         /* Check for any existing children */
972         TAILQ_FOREACH(child, list, node) {
973                 if (child->parent == node)
974                         return NIX_ERR_TM_CHILD_EXISTS;
975         }
976
977         /* Remove shaper profile reference */
978         profile_id = node->shaper_profile_id;
979         profile = nix_tm_shaper_profile_search(nix, profile_id);
980
981         /* Free hw resource locally */
982         if (node->flags & NIX_TM_NODE_HWRES) {
983                 rc = nix_tm_free_node_resource(nix, node);
984                 if (rc)
985                         return rc;
986         }
987
988         if (profile)
989                 profile->ref_cnt--;
990
991         TAILQ_REMOVE(list, node, node);
992
993         plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
994                    "parent %u profile 0x%x tree %u (%p)",
995                    nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
996                    node->priority, node->weight,
997                    node->parent ? node->parent->id : UINT32_MAX,
998                    node->shaper_profile_id, tree, node);
999         /* Free only if requested */
1000         if (free)
1001                 nix_tm_node_free(node);
1002         return 0;
1003 }
1004
1005 static int
1006 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
1007                     uint16_t *contig_id, int *contig_cnt,
1008                     struct nix_tm_node_list *list)
1009 {
1010         struct nix_tm_node *child;
1011         struct plt_bitmap *bmp;
1012         uint8_t child_hw_lvl;
1013         int spare_schq = -1;
1014         uint32_t pos = 0;
1015         uint64_t slab;
1016         uint16_t schq;
1017
1018         child_hw_lvl = parent->hw_lvl - 1;
1019         bmp = nix->schq_bmp[child_hw_lvl];
1020         plt_bitmap_scan_init(bmp);
1021         slab = 0;
1022
1023         /* Save spare schq if it is case of RR + SP */
1024         if (parent->rr_prio != 0xf && *contig_cnt > 1)
1025                 spare_schq = *contig_id + parent->rr_prio;
1026
1027         TAILQ_FOREACH(child, list, node) {
1028                 if (!child->parent)
1029                         continue;
1030                 if (child->parent->id != parent->id)
1031                         continue;
1032
1033                 /* Resource never expected to be present */
1034                 if (child->flags & NIX_TM_NODE_HWRES) {
1035                         plt_err("Resource exists for child (%s)%u, id %u (%p)",
1036                                 nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
1037                                 child->id, child);
1038                         return -EFAULT;
1039                 }
1040
1041                 if (!slab) {
1042                         if (!plt_bitmap_scan(bmp, &pos, &slab))
1043                                 return -ENOENT;
1044                 }
1045
1046                 if (child->priority == parent->rr_prio && spare_schq != -1) {
1047                         /* Use spare schq first if present */
1048                         schq = spare_schq;
1049                         spare_schq = -1;
1050                         *contig_cnt = *contig_cnt - 1;
1051
1052                 } else if (child->priority == parent->rr_prio) {
1053                         /* Assign a discontiguous queue */
1054                         if (!slab) {
1055                                 plt_err("Schq not found for Child %u "
1056                                         "lvl %u (%p)",
1057                                         child->id, child->lvl, child);
1058                                 return -ENOENT;
1059                         }
1060
1061                         schq = bitmap_ctzll(slab);
1062                         slab &= ~(1ULL << schq);
1063                         schq += pos;
1064                         plt_bitmap_clear(bmp, schq);
1065                 } else {
1066                         /* Assign a contiguous queue */
1067                         schq = *contig_id + child->priority;
1068                         *contig_cnt = *contig_cnt - 1;
1069                 }
1070
1071                 plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
1072                            nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
1073                            child->id, child);
1074
1075                 child->hw_id = schq;
1076                 child->parent_hw_id = parent->hw_id;
1077                 child->flags |= NIX_TM_NODE_HWRES;
1078         }
1079
1080         return 0;
1081 }
1082
1083 int
1084 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
1085 {
1086         struct nix_tm_node *parent, *root = NULL;
1087         struct plt_bitmap *bmp, *bmp_contig;
1088         struct nix_tm_node_list *list;
1089         uint8_t child_hw_lvl, hw_lvl;
1090         uint16_t contig_id, j;
1091         uint64_t slab = 0;
1092         uint32_t pos = 0;
1093         int cnt, rc;
1094
1095         list = nix_tm_node_list(nix, tree);
1096         /* Walk from TL1 to TL4 parents */
1097         for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
1098                 TAILQ_FOREACH(parent, list, node) {
1099                         child_hw_lvl = parent->hw_lvl - 1;
1100                         if (parent->hw_lvl != hw_lvl)
1101                                 continue;
1102
1103                         /* Remember root for future */
1104                         if (parent->hw_lvl == nix->tm_root_lvl)
1105                                 root = parent;
1106
1107                         if (!parent->child_realloc) {
1108                                 /* Skip when parent is not dirty */
1109                                 if (nix_tm_child_res_valid(list, parent))
1110                                         continue;
1111                                 plt_err("Parent not dirty but invalid "
1112                                         "child res parent id %u(lvl %u)",
1113                                         parent->id, parent->lvl);
1114                                 return -EFAULT;
1115                         }
1116
1117                         bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
1118
1119                         /* Prealloc contiguous indices for a parent */
1120                         contig_id = NIX_TM_MAX_HW_TXSCHQ;
1121                         cnt = (int)parent->max_prio + 1;
1122                         if (cnt > 0) {
1123                                 plt_bitmap_scan_init(bmp_contig);
1124                                 if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
1125                                         plt_err("Contig schq not found");
1126                                         return -ENOENT;
1127                                 }
1128                                 contig_id = pos + bitmap_ctzll(slab);
1129
1130                                 /* Check if we have enough */
1131                                 for (j = contig_id; j < contig_id + cnt; j++) {
1132                                         if (!plt_bitmap_get(bmp_contig, j))
1133                                                 break;
1134                                 }
1135
1136                                 if (j != contig_id + cnt) {
1137                                         plt_err("Contig schq not sufficient");
1138                                         return -ENOENT;
1139                                 }
1140
1141                                 for (j = contig_id; j < contig_id + cnt; j++)
1142                                         plt_bitmap_clear(bmp_contig, j);
1143                         }
1144
1145                         /* Assign hw id to all children */
1146                         rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
1147                                                  list);
1148                         if (cnt || rc) {
1149                                 plt_err("Unexpected err, contig res alloc, "
1150                                         "parent %u, of %s, rc=%d, cnt=%d",
1151                                         parent->id, nix_tm_hwlvl2str(hw_lvl),
1152                                         rc, cnt);
1153                                 return -EFAULT;
1154                         }
1155
1156                         /* Clear the dirty bit as children's
1157                          * resources are reallocated.
1158                          */
1159                         parent->child_realloc = false;
1160                 }
1161         }
1162
1163         /* Root is always expected to be there */
1164         if (!root)
1165                 return -EFAULT;
1166
1167         if (root->flags & NIX_TM_NODE_HWRES)
1168                 return 0;
1169
1170         /* Process root node */
1171         bmp = nix->schq_bmp[nix->tm_root_lvl];
1172         plt_bitmap_scan_init(bmp);
1173         if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1174                 plt_err("Resource not allocated for root");
1175                 return -EIO;
1176         }
1177
1178         root->hw_id = pos + bitmap_ctzll(slab);
1179         root->flags |= NIX_TM_NODE_HWRES;
1180         plt_bitmap_clear(bmp, root->hw_id);
1181
1182         /* Get TL1 id as well when root is not TL1 */
1183         if (!nix_tm_have_tl1_access(nix)) {
1184                 bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
1185
1186                 plt_bitmap_scan_init(bmp);
1187                 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1188                         plt_err("Resource not found for TL1");
1189                         return -EIO;
1190                 }
1191                 root->parent_hw_id = pos + bitmap_ctzll(slab);
1192                 plt_bitmap_clear(bmp, root->parent_hw_id);
1193         }
1194
1195         plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
1196                    nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
1197
1198         return 0;
1199 }
1200
1201 void
1202 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
1203 {
1204         uint8_t lvl;
1205         uint16_t i;
1206
1207         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1208                 for (i = 0; i < rsp->schq[lvl]; i++)
1209                         plt_bitmap_set(nix->schq_bmp[lvl],
1210                                        rsp->schq_list[lvl][i]);
1211
1212                 for (i = 0; i < rsp->schq_contig[lvl]; i++)
1213                         plt_bitmap_set(nix->schq_contig_bmp[lvl],
1214                                        rsp->schq_contig_list[lvl][i]);
1215         }
1216 }
1217
1218 int
1219 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
1220 {
1221         uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
1222         struct mbox *mbox = (&nix->dev)->mbox;
1223         uint16_t schq[NIX_TXSCH_LVL_CNT];
1224         struct nix_txsch_alloc_req *req;
1225         struct nix_txsch_alloc_rsp *rsp;
1226         uint8_t hw_lvl, i;
1227         bool pend;
1228         int rc;
1229
1230         memset(schq, 0, sizeof(schq));
1231         memset(schq_contig, 0, sizeof(schq_contig));
1232
1233         /* Estimate requirement */
1234         rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
1235         if (!rc)
1236                 return 0;
1237
1238         /* Release existing contiguous resources when realloc requested
1239          * as there is no way to guarantee continuity of old with new.
1240          */
1241         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1242                 if (schq_contig[hw_lvl])
1243                         nix_tm_release_resources(nix, hw_lvl, true, false);
1244         }
1245
1246         /* Alloc as needed */
1247         do {
1248                 pend = false;
1249                 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
1250                 if (!req) {
1251                         rc = -ENOMEM;
1252                         goto alloc_err;
1253                 }
1254                 mbox_memcpy(req->schq, schq, sizeof(req->schq));
1255                 mbox_memcpy(req->schq_contig, schq_contig,
1256                             sizeof(req->schq_contig));
1257
1258                 /* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
1259                  * So split alloc to multiple requests.
1260                  */
1261                 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1262                         if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
1263                                 req->schq[i] = MAX_TXSCHQ_PER_FUNC;
1264                         schq[i] -= req->schq[i];
1265
1266                         if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
1267                                 req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
1268                         schq_contig[i] -= req->schq_contig[i];
1269
1270                         if (schq[i] || schq_contig[i])
1271                                 pend = true;
1272                 }
1273
1274                 rc = mbox_process_msg(mbox, (void *)&rsp);
1275                 if (rc)
1276                         goto alloc_err;
1277
1278                 nix_tm_copy_rsp_to_nix(nix, rsp);
1279         } while (pend);
1280
1281         nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
1282         return 0;
1283 alloc_err:
1284         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1285                 if (nix_tm_release_resources(nix, i, true, false))
1286                         plt_err("Failed to release contig resources of "
1287                                 "lvl %d on error",
1288                                 i);
1289                 if (nix_tm_release_resources(nix, i, false, false))
1290                         plt_err("Failed to release discontig resources of "
1291                                 "lvl %d on error",
1292                                 i);
1293         }
1294         return rc;
1295 }
1296
1297 int
1298 nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
1299 {
1300         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1301         uint32_t nonleaf_id = nix->nb_tx_queues;
1302         struct nix_tm_node *node = NULL;
1303         uint8_t leaf_lvl, lvl, lvl_end;
1304         uint32_t parent, i;
1305         int rc = 0;
1306
1307         /* Add ROOT, SCH1, SCH2, SCH3, [SCH4]  nodes */
1308         parent = ROC_NIX_TM_NODE_ID_INVALID;
1309         /* With TL1 access we have an extra level */
1310         lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
1311                                                        ROC_TM_LVL_SCH3);
1312
1313         for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1314                 rc = -ENOMEM;
1315                 node = nix_tm_node_alloc();
1316                 if (!node)
1317                         goto error;
1318
1319                 node->id = nonleaf_id;
1320                 node->parent_id = parent;
1321                 node->priority = 0;
1322                 node->weight = NIX_TM_DFLT_RR_WT;
1323                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1324                 node->lvl = lvl;
1325                 node->tree = ROC_NIX_TM_DEFAULT;
1326                 node->rel_chan = NIX_TM_CHAN_INVALID;
1327
1328                 rc = nix_tm_node_add(roc_nix, node);
1329                 if (rc)
1330                         goto error;
1331                 parent = nonleaf_id;
1332                 nonleaf_id++;
1333         }
1334
1335         parent = nonleaf_id - 1;
1336         leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1337                                                         ROC_TM_LVL_SCH4);
1338
1339         /* Add leaf nodes */
1340         for (i = 0; i < nix->nb_tx_queues; i++) {
1341                 rc = -ENOMEM;
1342                 node = nix_tm_node_alloc();
1343                 if (!node)
1344                         goto error;
1345
1346                 node->id = i;
1347                 node->parent_id = parent;
1348                 node->priority = 0;
1349                 node->weight = NIX_TM_DFLT_RR_WT;
1350                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1351                 node->lvl = leaf_lvl;
1352                 node->tree = ROC_NIX_TM_DEFAULT;
1353                 node->rel_chan = NIX_TM_CHAN_INVALID;
1354
1355                 rc = nix_tm_node_add(roc_nix, node);
1356                 if (rc)
1357                         goto error;
1358         }
1359
1360         return 0;
1361 error:
1362         nix_tm_node_free(node);
1363         return rc;
1364 }
1365
1366 int
1367 roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
1368 {
1369         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1370         uint32_t nonleaf_id = nix->nb_tx_queues;
1371         struct nix_tm_node *node = NULL;
1372         uint8_t leaf_lvl, lvl, lvl_end;
1373         uint32_t parent, i;
1374         int rc = 0;
1375
1376         /* Add ROOT, SCH1, SCH2 nodes */
1377         parent = ROC_NIX_TM_NODE_ID_INVALID;
1378         lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
1379                                                        ROC_TM_LVL_SCH2);
1380
1381         for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1382                 rc = -ENOMEM;
1383                 node = nix_tm_node_alloc();
1384                 if (!node)
1385                         goto error;
1386
1387                 node->id = nonleaf_id;
1388                 node->parent_id = parent;
1389                 node->priority = 0;
1390                 node->weight = NIX_TM_DFLT_RR_WT;
1391                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1392                 node->lvl = lvl;
1393                 node->tree = ROC_NIX_TM_RLIMIT;
1394                 node->rel_chan = NIX_TM_CHAN_INVALID;
1395
1396                 rc = nix_tm_node_add(roc_nix, node);
1397                 if (rc)
1398                         goto error;
1399                 parent = nonleaf_id;
1400                 nonleaf_id++;
1401         }
1402
1403         /* SMQ is mapped to SCH4 when we have TL1 access and SCH3 otherwise */
1404         lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
1405
1406         /* Add per queue SMQ nodes i.e SCH4 / SCH3 */
1407         for (i = 0; i < nix->nb_tx_queues; i++) {
1408                 rc = -ENOMEM;
1409                 node = nix_tm_node_alloc();
1410                 if (!node)
1411                         goto error;
1412
1413                 node->id = nonleaf_id + i;
1414                 node->parent_id = parent;
1415                 node->priority = 0;
1416                 node->weight = NIX_TM_DFLT_RR_WT;
1417                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1418                 node->lvl = lvl;
1419                 node->tree = ROC_NIX_TM_RLIMIT;
1420                 node->rel_chan = NIX_TM_CHAN_INVALID;
1421
1422                 rc = nix_tm_node_add(roc_nix, node);
1423                 if (rc)
1424                         goto error;
1425         }
1426
1427         parent = nonleaf_id;
1428         leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1429                                                         ROC_TM_LVL_SCH4);
1430
1431         /* Add leaf nodes */
1432         for (i = 0; i < nix->nb_tx_queues; i++) {
1433                 rc = -ENOMEM;
1434                 node = nix_tm_node_alloc();
1435                 if (!node)
1436                         goto error;
1437
1438                 node->id = i;
1439                 node->parent_id = parent + i;
1440                 node->priority = 0;
1441                 node->weight = NIX_TM_DFLT_RR_WT;
1442                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1443                 node->lvl = leaf_lvl;
1444                 node->tree = ROC_NIX_TM_RLIMIT;
1445                 node->rel_chan = NIX_TM_CHAN_INVALID;
1446
1447                 rc = nix_tm_node_add(roc_nix, node);
1448                 if (rc)
1449                         goto error;
1450         }
1451
1452         return 0;
1453 error:
1454         nix_tm_node_free(node);
1455         return rc;
1456 }
1457
1458 int
1459 roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
1460 {
1461         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1462         uint32_t nonleaf_id = nix->nb_tx_queues;
1463         struct nix_tm_node *node = NULL;
1464         uint8_t leaf_lvl, lvl, lvl_end;
1465         uint32_t tl2_node_id;
1466         uint32_t parent, i;
1467         int rc = -ENOMEM;
1468
1469         parent = ROC_NIX_TM_NODE_ID_INVALID;
1470         lvl_end = ROC_TM_LVL_SCH3;
1471         leaf_lvl = ROC_TM_LVL_QUEUE;
1472
1473         /* TL1 node */
1474         node = nix_tm_node_alloc();
1475         if (!node)
1476                 goto error;
1477
1478         node->id = nonleaf_id;
1479         node->parent_id = parent;
1480         node->priority = 0;
1481         node->weight = NIX_TM_DFLT_RR_WT;
1482         node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1483         node->lvl = ROC_TM_LVL_ROOT;
1484         node->tree = ROC_NIX_TM_PFC;
1485         node->rel_chan = NIX_TM_CHAN_INVALID;
1486
1487         rc = nix_tm_node_add(roc_nix, node);
1488         if (rc)
1489                 goto error;
1490
1491         parent = nonleaf_id;
1492         nonleaf_id++;
1493
1494         /* TL2 node */
1495         rc = -ENOMEM;
1496         node = nix_tm_node_alloc();
1497         if (!node)
1498                 goto error;
1499
1500         node->id = nonleaf_id;
1501         node->parent_id = parent;
1502         node->priority = 0;
1503         node->weight = NIX_TM_DFLT_RR_WT;
1504         node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1505         node->lvl = ROC_TM_LVL_SCH1;
1506         node->tree = ROC_NIX_TM_PFC;
1507         node->rel_chan = NIX_TM_CHAN_INVALID;
1508
1509         rc = nix_tm_node_add(roc_nix, node);
1510         if (rc)
1511                 goto error;
1512
1513         tl2_node_id = nonleaf_id;
1514         nonleaf_id++;
1515
1516         for (i = 0; i < nix->nb_tx_queues; i++) {
1517                 parent = tl2_node_id;
1518                 for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
1519                         rc = -ENOMEM;
1520                         node = nix_tm_node_alloc();
1521                         if (!node)
1522                                 goto error;
1523
1524                         node->id = nonleaf_id;
1525                         node->parent_id = parent;
1526                         node->priority = 0;
1527                         node->weight = NIX_TM_DFLT_RR_WT;
1528                         node->shaper_profile_id =
1529                                 ROC_NIX_TM_SHAPER_PROFILE_NONE;
1530                         node->lvl = lvl;
1531                         node->tree = ROC_NIX_TM_PFC;
1532                         node->rel_chan = NIX_TM_CHAN_INVALID;
1533
1534                         rc = nix_tm_node_add(roc_nix, node);
1535                         if (rc)
1536                                 goto error;
1537
1538                         parent = nonleaf_id;
1539                         nonleaf_id++;
1540                 }
1541
1542                 lvl = ROC_TM_LVL_SCH4;
1543
1544                 rc = -ENOMEM;
1545                 node = nix_tm_node_alloc();
1546                 if (!node)
1547                         goto error;
1548
1549                 node->id = nonleaf_id;
1550                 node->parent_id = parent;
1551                 node->priority = 0;
1552                 node->weight = NIX_TM_DFLT_RR_WT;
1553                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1554                 node->lvl = lvl;
1555                 node->tree = ROC_NIX_TM_PFC;
1556                 node->rel_chan = NIX_TM_CHAN_INVALID;
1557
1558                 rc = nix_tm_node_add(roc_nix, node);
1559                 if (rc)
1560                         goto error;
1561
1562                 parent = nonleaf_id;
1563                 nonleaf_id++;
1564
1565                 rc = -ENOMEM;
1566                 node = nix_tm_node_alloc();
1567                 if (!node)
1568                         goto error;
1569
1570                 node->id = i;
1571                 node->parent_id = parent;
1572                 node->priority = 0;
1573                 node->weight = NIX_TM_DFLT_RR_WT;
1574                 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1575                 node->lvl = leaf_lvl;
1576                 node->tree = ROC_NIX_TM_PFC;
1577                 node->rel_chan = NIX_TM_CHAN_INVALID;
1578
1579                 rc = nix_tm_node_add(roc_nix, node);
1580                 if (rc)
1581                         goto error;
1582         }
1583
1584         return 0;
1585 error:
1586         nix_tm_node_free(node);
1587         return rc;
1588 }
1589
1590 int
1591 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
1592 {
1593         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1594         struct nix_tm_shaper_profile *profile;
1595         struct nix_tm_node *node, *next_node;
1596         struct nix_tm_node_list *list;
1597         enum roc_nix_tm_tree tree;
1598         uint32_t profile_id;
1599         int rc = 0;
1600
1601         for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
1602                 if (!(tree_mask & BIT(tree)))
1603                         continue;
1604
1605                 plt_tm_dbg("Freeing resources of tree %u", tree);
1606
1607                 list = nix_tm_node_list(nix, tree);
1608                 next_node = TAILQ_FIRST(list);
1609                 while (next_node) {
1610                         node = next_node;
1611                         next_node = TAILQ_NEXT(node, node);
1612
1613                         if (!nix_tm_is_leaf(nix, node->lvl) &&
1614                             node->flags & NIX_TM_NODE_HWRES) {
1615                                 /* Clear xoff in path for flush to succeed */
1616                                 rc = nix_tm_clear_path_xoff(nix, node);
1617                                 if (rc)
1618                                         return rc;
1619                                 rc = nix_tm_free_node_resource(nix, node);
1620                                 if (rc)
1621                                         return rc;
1622                         }
1623                 }
1624
1625                 /* Leave software elements if needed */
1626                 if (hw_only)
1627                         continue;
1628
1629                 next_node = TAILQ_FIRST(list);
1630                 while (next_node) {
1631                         node = next_node;
1632                         next_node = TAILQ_NEXT(node, node);
1633
1634                         plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
1635                                    node->id, node);
1636
1637                         profile_id = node->shaper_profile_id;
1638                         profile = nix_tm_shaper_profile_search(nix, profile_id);
1639                         if (profile)
1640                                 profile->ref_cnt--;
1641
1642                         TAILQ_REMOVE(list, node, node);
1643                         nix_tm_node_free(node);
1644                 }
1645         }
1646         return rc;
1647 }
1648
1649 int
1650 nix_tm_conf_init(struct roc_nix *roc_nix)
1651 {
1652         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1653         uint32_t bmp_sz, hw_lvl;
1654         void *bmp_mem;
1655         int rc, i;
1656
1657         PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1658         PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1659                           ROC_NIX_TM_SHAPER_PROFILE_SZ);
1660
1661         nix->tm_flags = 0;
1662         for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1663                 TAILQ_INIT(&nix->trees[i]);
1664
1665         TAILQ_INIT(&nix->shaper_profile_list);
1666         nix->tm_rate_min = 1E9; /* 1Gbps */
1667
1668         rc = -ENOMEM;
1669         bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1670         bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1671         if (!bmp_mem)
1672                 return rc;
1673         nix->schq_bmp_mem = bmp_mem;
1674
1675         /* Init contiguous and discontiguous bitmap per lvl */
1676         rc = -EIO;
1677         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1678                 /* Bitmap for discontiguous resource */
1679                 nix->schq_bmp[hw_lvl] =
1680                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1681                 if (!nix->schq_bmp[hw_lvl])
1682                         goto exit;
1683
1684                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1685
1686                 /* Bitmap for contiguous resource */
1687                 nix->schq_contig_bmp[hw_lvl] =
1688                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1689                 if (!nix->schq_contig_bmp[hw_lvl])
1690                         goto exit;
1691
1692                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1693         }
1694
1695         /* Disable TL1 Static Priority when VF's are enabled
1696          * as otherwise VF's TL2 reallocation will be needed
1697          * runtime to support a specific topology of PF.
1698          */
1699         if (nix->pci_dev->max_vfs)
1700                 nix->tm_flags |= NIX_TM_TL1_NO_SP;
1701
1702         /* TL1 access is only for PF's */
1703         if (roc_nix_is_pf(roc_nix)) {
1704                 nix->tm_flags |= NIX_TM_TL1_ACCESS;
1705                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
1706         } else {
1707                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
1708         }
1709
1710         return 0;
1711 exit:
1712         nix_tm_conf_fini(roc_nix);
1713         return rc;
1714 }
1715
1716 void
1717 nix_tm_conf_fini(struct roc_nix *roc_nix)
1718 {
1719         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1720         uint16_t hw_lvl;
1721
1722         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1723                 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
1724                 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
1725         }
1726         plt_free(nix->schq_bmp_mem);
1727 }