common/cnxk: add NIX TM helper to alloc/free resource
[dpdk.git] / drivers / common / cnxk / roc_nix_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 static inline int
9 bitmap_ctzll(uint64_t slab)
10 {
11         if (slab == 0)
12                 return 0;
13
14         return __builtin_ctzll(slab);
15 }
16
17 void
18 nix_tm_clear_shaper_profiles(struct nix *nix)
19 {
20         struct nix_tm_shaper_profile *shaper_profile;
21
22         shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
23         while (shaper_profile != NULL) {
24                 if (shaper_profile->ref_cnt)
25                         plt_warn("Shaper profile %u has non zero references",
26                                  shaper_profile->id);
27                 TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
28                 nix_tm_shaper_profile_free(shaper_profile);
29                 shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
30         }
31 }
32
33 int
34 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
35 {
36         struct nix_tm_node *child, *parent;
37         struct nix_tm_node_list *list;
38         uint32_t rr_prio, max_prio;
39         uint32_t rr_num = 0;
40
41         list = nix_tm_node_list(nix, tree);
42
43         /* Release all the node hw resources locally
44          * if parent marked as dirty and resource exists.
45          */
46         TAILQ_FOREACH(child, list, node) {
47                 /* Release resource only if parent direct hierarchy changed */
48                 if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
49                     child->parent->child_realloc) {
50                         nix_tm_free_node_resource(nix, child);
51                 }
52                 child->max_prio = UINT32_MAX;
53         }
54
55         TAILQ_FOREACH(parent, list, node) {
56                 /* Count group of children of same priority i.e are RR */
57                 rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
58                                          &max_prio);
59
60                 /* Assuming that multiple RR groups are
61                  * not configured based on capability.
62                  */
63                 parent->rr_prio = rr_prio;
64                 parent->rr_num = rr_num;
65                 parent->max_prio = max_prio;
66         }
67
68         return 0;
69 }
70
71 int
72 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
73 {
74         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
75         struct nix_tm_shaper_profile *profile;
76         uint32_t node_id, parent_id, lvl;
77         struct nix_tm_node *parent_node;
78         uint32_t priority, profile_id;
79         uint8_t hw_lvl, exp_next_lvl;
80         enum roc_nix_tm_tree tree;
81         int rc;
82
83         node_id = node->id;
84         priority = node->priority;
85         parent_id = node->parent_id;
86         profile_id = node->shaper_profile_id;
87         lvl = node->lvl;
88         tree = node->tree;
89
90         plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
91                    "parent %u profile 0x%x tree %u",
92                    nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
93                    priority, node->weight, parent_id, profile_id, tree);
94
95         if (tree >= ROC_NIX_TM_TREE_MAX)
96                 return NIX_ERR_PARAM;
97
98         /* Translate sw level id's to nix hw level id's */
99         hw_lvl = nix_tm_lvl2nix(nix, lvl);
100         if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
101                 return NIX_ERR_TM_INVALID_LVL;
102
103         /* Leaf nodes have to be same priority */
104         if (nix_tm_is_leaf(nix, lvl) && priority != 0)
105                 return NIX_ERR_TM_INVALID_PRIO;
106
107         parent_node = nix_tm_node_search(nix, parent_id, tree);
108
109         if (node_id < nix->nb_tx_queues)
110                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
111         else
112                 exp_next_lvl = hw_lvl + 1;
113
114         /* Check if there is no parent node yet */
115         if (hw_lvl != nix->tm_root_lvl &&
116             (!parent_node || parent_node->hw_lvl != exp_next_lvl))
117                 return NIX_ERR_TM_INVALID_PARENT;
118
119         /* Check if a node already exists */
120         if (nix_tm_node_search(nix, node_id, tree))
121                 return NIX_ERR_TM_NODE_EXISTS;
122
123         profile = nix_tm_shaper_profile_search(nix, profile_id);
124         if (!nix_tm_is_leaf(nix, lvl)) {
125                 /* Check if shaper profile exists for non leaf node */
126                 if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
127                         return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
128
129                 /* Packet mode in profile should match with that of tm node */
130                 if (profile && profile->pkt_mode != node->pkt_mode)
131                         return NIX_ERR_TM_PKT_MODE_MISMATCH;
132         }
133
134         /* Check if there is second DWRR already in siblings or holes in prio */
135         rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
136         if (rc)
137                 return rc;
138
139         if (node->weight > ROC_NIX_TM_MAX_SCHED_WT)
140                 return NIX_ERR_TM_WEIGHT_EXCEED;
141
142         /* Maintain minimum weight */
143         if (!node->weight)
144                 node->weight = 1;
145
146         node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
147         node->rr_prio = 0xF;
148         node->max_prio = UINT32_MAX;
149         node->hw_id = NIX_TM_HW_ID_INVALID;
150         node->flags = 0;
151
152         if (profile)
153                 profile->ref_cnt++;
154
155         node->parent = parent_node;
156         if (parent_node)
157                 parent_node->child_realloc = true;
158         node->parent_hw_id = NIX_TM_HW_ID_INVALID;
159
160         TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
161         plt_tm_dbg("Added node %s lvl %u id %u (%p)",
162                    nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
163         return 0;
164 }
165
166 int
167 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
168 {
169         struct mbox *mbox = (&nix->dev)->mbox;
170         struct nix_txschq_config *req;
171         struct nix_tm_node *p;
172         int rc;
173
174         /* Enable nodes in path for flush to succeed */
175         if (!nix_tm_is_leaf(nix, node->lvl))
176                 p = node;
177         else
178                 p = node->parent;
179         while (p) {
180                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
181                     (p->flags & NIX_TM_NODE_HWRES)) {
182                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
183                         req->lvl = p->hw_lvl;
184                         req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
185                                                             req->regval);
186                         rc = mbox_process(mbox);
187                         if (rc)
188                                 return rc;
189
190                         p->flags |= NIX_TM_NODE_ENABLED;
191                 }
192                 p = p->parent;
193         }
194
195         return 0;
196 }
197
198 int
199 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
200 {
201         struct mbox *mbox = (&nix->dev)->mbox;
202         struct nix_txschq_config *req;
203         uint16_t smq;
204         int rc;
205
206         smq = node->hw_id;
207         plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
208                    enable ? "enable" : "disable");
209
210         rc = nix_tm_clear_path_xoff(nix, node);
211         if (rc)
212                 return rc;
213
214         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
215         req->lvl = NIX_TXSCH_LVL_SMQ;
216         req->num_regs = 1;
217
218         req->reg[0] = NIX_AF_SMQX_CFG(smq);
219         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
220         req->regval_mask[0] =
221                 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
222
223         return mbox_process(mbox);
224 }
225
226 int
227 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
228                      uint16_t *smq)
229 {
230         struct nix_tm_node *node;
231         int rc;
232
233         node = nix_tm_node_search(nix, sq, nix->tm_tree);
234
235         /* Check if we found a valid leaf node */
236         if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
237             node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
238                 return -EIO;
239         }
240
241         /* Get SMQ Id of leaf node's parent */
242         *smq = node->parent->hw_id;
243         *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
244
245         rc = nix_tm_smq_xoff(nix, node->parent, false);
246         if (rc)
247                 return rc;
248         node->flags |= NIX_TM_NODE_ENABLED;
249         return 0;
250 }
251
252 int
253 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
254 {
255         struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
256         uint16_t sqb_cnt, head_off, tail_off;
257         uint64_t wdata, val, prev;
258         uint16_t qid = sq->qid;
259         int64_t *regaddr;
260         uint64_t timeout; /* 10's of usec */
261
262         /* Wait for enough time based on shaper min rate */
263         timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
264         /* Wait for worst case scenario of this SQ being last priority
265          * and so have to wait for all other SQ's drain out by their own.
266          */
267         timeout = timeout * nix->nb_tx_queues;
268         timeout = timeout / nix->tm_rate_min;
269         if (!timeout)
270                 timeout = 10000;
271
272         wdata = ((uint64_t)qid << 32);
273         regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
274         val = roc_atomic64_add_nosync(wdata, regaddr);
275
276         /* Spin multiple iterations as "sq->fc_cache_pkts" can still
277          * have space to send pkts even though fc_mem is disabled
278          */
279
280         while (true) {
281                 prev = val;
282                 plt_delay_us(10);
283                 val = roc_atomic64_add_nosync(wdata, regaddr);
284                 /* Continue on error */
285                 if (val & BIT_ULL(63))
286                         continue;
287
288                 if (prev != val)
289                         continue;
290
291                 sqb_cnt = val & 0xFFFF;
292                 head_off = (val >> 20) & 0x3F;
293                 tail_off = (val >> 28) & 0x3F;
294
295                 /* SQ reached quiescent state */
296                 if (sqb_cnt <= 1 && head_off == tail_off &&
297                     (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
298                         break;
299                 }
300
301                 /* Timeout */
302                 if (!timeout)
303                         goto exit;
304                 timeout--;
305         }
306
307         return 0;
308 exit:
309         roc_nix_queues_ctx_dump(sq->roc_nix);
310         return -EFAULT;
311 }
312
313 /* Flush and disable tx queue and its parent SMQ */
314 int
315 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
316 {
317         struct roc_nix *roc_nix = sq->roc_nix;
318         struct nix_tm_node *node, *sibling;
319         struct nix_tm_node_list *list;
320         enum roc_nix_tm_tree tree;
321         struct mbox *mbox;
322         struct nix *nix;
323         uint16_t qid;
324         int rc;
325
326         nix = roc_nix_to_nix_priv(roc_nix);
327
328         /* Need not do anything if tree is in disabled state */
329         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
330                 return 0;
331
332         mbox = (&nix->dev)->mbox;
333         qid = sq->qid;
334
335         tree = nix->tm_tree;
336         list = nix_tm_node_list(nix, tree);
337
338         /* Find the node for this SQ */
339         node = nix_tm_node_search(nix, qid, tree);
340         if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
341                 plt_err("Invalid node/state for sq %u", qid);
342                 return -EFAULT;
343         }
344
345         /* Enable CGX RXTX to drain pkts */
346         if (!roc_nix->io_enabled) {
347                 /* Though it enables both RX MCAM Entries and CGX Link
348                  * we assume all the rx queues are stopped way back.
349                  */
350                 mbox_alloc_msg_nix_lf_start_rx(mbox);
351                 rc = mbox_process(mbox);
352                 if (rc) {
353                         plt_err("cgx start failed, rc=%d", rc);
354                         return rc;
355                 }
356         }
357
358         /* Disable smq xoff for case it was enabled earlier */
359         rc = nix_tm_smq_xoff(nix, node->parent, false);
360         if (rc) {
361                 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
362                         rc);
363                 return rc;
364         }
365
366         /* As per HRM, to disable an SQ, all other SQ's
367          * that feed to same SMQ must be paused before SMQ flush.
368          */
369         TAILQ_FOREACH(sibling, list, node) {
370                 if (sibling->parent != node->parent)
371                         continue;
372                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
373                         continue;
374
375                 qid = sibling->id;
376                 sq = nix->sqs[qid];
377                 if (!sq)
378                         continue;
379
380                 rc = roc_nix_tm_sq_aura_fc(sq, false);
381                 if (rc) {
382                         plt_err("Failed to disable sqb aura fc, rc=%d", rc);
383                         goto cleanup;
384                 }
385
386                 /* Wait for sq entries to be flushed */
387                 rc = roc_nix_tm_sq_flush_spin(sq);
388                 if (rc) {
389                         plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
390                         return rc;
391                 }
392         }
393
394         node->flags &= ~NIX_TM_NODE_ENABLED;
395
396         /* Disable and flush */
397         rc = nix_tm_smq_xoff(nix, node->parent, true);
398         if (rc) {
399                 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
400                         rc);
401                 goto cleanup;
402         }
403 cleanup:
404         /* Restore cgx state */
405         if (!roc_nix->io_enabled) {
406                 mbox_alloc_msg_nix_lf_stop_rx(mbox);
407                 rc |= mbox_process(mbox);
408         }
409
410         return rc;
411 }
412
413 int
414 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
415 {
416         struct roc_nix *roc_nix = sq->roc_nix;
417         struct nix_tm_node *node, *sibling;
418         struct nix_tm_node_list *list;
419         enum roc_nix_tm_tree tree;
420         struct roc_nix_sq *s_sq;
421         bool once = false;
422         uint16_t qid, s_qid;
423         struct nix *nix;
424         int rc;
425
426         nix = roc_nix_to_nix_priv(roc_nix);
427
428         /* Need not do anything if tree is in disabled state */
429         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
430                 return 0;
431
432         qid = sq->qid;
433         tree = nix->tm_tree;
434         list = nix_tm_node_list(nix, tree);
435
436         /* Find the node for this SQ */
437         node = nix_tm_node_search(nix, qid, tree);
438         if (!node) {
439                 plt_err("Invalid node for sq %u", qid);
440                 return -EFAULT;
441         }
442
443         /* Enable all the siblings back */
444         TAILQ_FOREACH(sibling, list, node) {
445                 if (sibling->parent != node->parent)
446                         continue;
447
448                 if (sibling->id == qid)
449                         continue;
450
451                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
452                         continue;
453
454                 s_qid = sibling->id;
455                 s_sq = nix->sqs[s_qid];
456                 if (!s_sq)
457                         continue;
458
459                 if (!once) {
460                         /* Enable back if any SQ is still present */
461                         rc = nix_tm_smq_xoff(nix, node->parent, false);
462                         if (rc) {
463                                 plt_err("Failed to enable smq %u, rc=%d",
464                                         node->parent->hw_id, rc);
465                                 return rc;
466                         }
467                         once = true;
468                 }
469
470                 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
471                 if (rc) {
472                         plt_err("Failed to enable sqb aura fc, rc=%d", rc);
473                         return rc;
474                 }
475         }
476
477         return 0;
478 }
479
480 int
481 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
482                          bool above_thresh)
483 {
484         uint16_t avail, thresh, to_free = 0, schq;
485         struct mbox *mbox = (&nix->dev)->mbox;
486         struct nix_txsch_free_req *req;
487         struct plt_bitmap *bmp;
488         uint64_t slab = 0;
489         uint32_t pos = 0;
490         int rc = -ENOSPC;
491
492         bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
493         thresh =
494                 contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
495         plt_bitmap_scan_init(bmp);
496
497         avail = nix_tm_resource_avail(nix, hw_lvl, contig);
498
499         if (above_thresh) {
500                 /* Release only above threshold */
501                 if (avail > thresh)
502                         to_free = avail - thresh;
503         } else {
504                 /* Release everything */
505                 to_free = avail;
506         }
507
508         /* Now release resources to AF */
509         while (to_free) {
510                 if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
511                         break;
512
513                 schq = bitmap_ctzll(slab);
514                 slab &= ~(1ULL << schq);
515                 schq += pos;
516
517                 /* Free to AF */
518                 req = mbox_alloc_msg_nix_txsch_free(mbox);
519                 if (req == NULL)
520                         return rc;
521                 req->flags = 0;
522                 req->schq_lvl = hw_lvl;
523                 req->schq = schq;
524                 rc = mbox_process(mbox);
525                 if (rc) {
526                         plt_err("failed to release hwres %s(%u) rc %d",
527                                 nix_tm_hwlvl2str(hw_lvl), schq, rc);
528                         return rc;
529                 }
530
531                 plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
532                            schq);
533                 plt_bitmap_clear(bmp, schq);
534                 to_free--;
535         }
536
537         if (to_free) {
538                 plt_err("resource inconsistency for %s(%u)",
539                         nix_tm_hwlvl2str(hw_lvl), contig);
540                 return -EFAULT;
541         }
542         return 0;
543 }
544
545 int
546 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
547 {
548         struct mbox *mbox = (&nix->dev)->mbox;
549         struct nix_txsch_free_req *req;
550         struct plt_bitmap *bmp;
551         uint16_t avail, hw_id;
552         uint8_t hw_lvl;
553         int rc = -ENOSPC;
554
555         hw_lvl = node->hw_lvl;
556         hw_id = node->hw_id;
557         bmp = nix->schq_bmp[hw_lvl];
558         /* Free specific HW resource */
559         plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
560                    nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
561                    node);
562
563         avail = nix_tm_resource_avail(nix, hw_lvl, false);
564         /* Always for now free to discontiguous queue when avail
565          * is not sufficient.
566          */
567         if (nix->discontig_rsvd[hw_lvl] &&
568             avail < nix->discontig_rsvd[hw_lvl]) {
569                 PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
570                 PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
571                 plt_bitmap_set(bmp, hw_id);
572                 node->hw_id = NIX_TM_HW_ID_INVALID;
573                 node->flags &= ~NIX_TM_NODE_HWRES;
574                 return 0;
575         }
576
577         /* Free to AF */
578         req = mbox_alloc_msg_nix_txsch_free(mbox);
579         if (req == NULL)
580                 return rc;
581         req->flags = 0;
582         req->schq_lvl = node->hw_lvl;
583         req->schq = hw_id;
584         rc = mbox_process(mbox);
585         if (rc) {
586                 plt_err("failed to release hwres %s(%u) rc %d",
587                         nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
588                 return rc;
589         }
590
591         /* Mark parent as dirty for reallocing it's children */
592         if (node->parent)
593                 node->parent->child_realloc = true;
594
595         node->hw_id = NIX_TM_HW_ID_INVALID;
596         node->flags &= ~NIX_TM_NODE_HWRES;
597         plt_tm_dbg("Released hwres %s(%u) to af",
598                    nix_tm_hwlvl2str(node->hw_lvl), hw_id);
599         return 0;
600 }
601
602 int
603 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
604                    enum roc_nix_tm_tree tree, bool free)
605 {
606         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
607         struct nix_tm_shaper_profile *profile;
608         struct nix_tm_node *node, *child;
609         struct nix_tm_node_list *list;
610         uint32_t profile_id;
611         int rc;
612
613         plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
614
615         node = nix_tm_node_search(nix, node_id, tree);
616         if (!node)
617                 return NIX_ERR_TM_INVALID_NODE;
618
619         list = nix_tm_node_list(nix, tree);
620         /* Check for any existing children */
621         TAILQ_FOREACH(child, list, node) {
622                 if (child->parent == node)
623                         return NIX_ERR_TM_CHILD_EXISTS;
624         }
625
626         /* Remove shaper profile reference */
627         profile_id = node->shaper_profile_id;
628         profile = nix_tm_shaper_profile_search(nix, profile_id);
629
630         /* Free hw resource locally */
631         if (node->flags & NIX_TM_NODE_HWRES) {
632                 rc = nix_tm_free_node_resource(nix, node);
633                 if (rc)
634                         return rc;
635         }
636
637         if (profile)
638                 profile->ref_cnt--;
639
640         TAILQ_REMOVE(list, node, node);
641
642         plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
643                    "parent %u profile 0x%x tree %u (%p)",
644                    nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
645                    node->priority, node->weight,
646                    node->parent ? node->parent->id : UINT32_MAX,
647                    node->shaper_profile_id, tree, node);
648         /* Free only if requested */
649         if (free)
650                 nix_tm_node_free(node);
651         return 0;
652 }
653
654 static int
655 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
656                     uint16_t *contig_id, int *contig_cnt,
657                     struct nix_tm_node_list *list)
658 {
659         struct nix_tm_node *child;
660         struct plt_bitmap *bmp;
661         uint8_t child_hw_lvl;
662         int spare_schq = -1;
663         uint32_t pos = 0;
664         uint64_t slab;
665         uint16_t schq;
666
667         child_hw_lvl = parent->hw_lvl - 1;
668         bmp = nix->schq_bmp[child_hw_lvl];
669         plt_bitmap_scan_init(bmp);
670         slab = 0;
671
672         /* Save spare schq if it is case of RR + SP */
673         if (parent->rr_prio != 0xf && *contig_cnt > 1)
674                 spare_schq = *contig_id + parent->rr_prio;
675
676         TAILQ_FOREACH(child, list, node) {
677                 if (!child->parent)
678                         continue;
679                 if (child->parent->id != parent->id)
680                         continue;
681
682                 /* Resource never expected to be present */
683                 if (child->flags & NIX_TM_NODE_HWRES) {
684                         plt_err("Resource exists for child (%s)%u, id %u (%p)",
685                                 nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
686                                 child->id, child);
687                         return -EFAULT;
688                 }
689
690                 if (!slab)
691                         plt_bitmap_scan(bmp, &pos, &slab);
692
693                 if (child->priority == parent->rr_prio && spare_schq != -1) {
694                         /* Use spare schq first if present */
695                         schq = spare_schq;
696                         spare_schq = -1;
697                         *contig_cnt = *contig_cnt - 1;
698
699                 } else if (child->priority == parent->rr_prio) {
700                         /* Assign a discontiguous queue */
701                         if (!slab) {
702                                 plt_err("Schq not found for Child %u "
703                                         "lvl %u (%p)",
704                                         child->id, child->lvl, child);
705                                 return -ENOENT;
706                         }
707
708                         schq = bitmap_ctzll(slab);
709                         slab &= ~(1ULL << schq);
710                         schq += pos;
711                         plt_bitmap_clear(bmp, schq);
712                 } else {
713                         /* Assign a contiguous queue */
714                         schq = *contig_id + child->priority;
715                         *contig_cnt = *contig_cnt - 1;
716                 }
717
718                 plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
719                            nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
720                            child->id, child);
721
722                 child->hw_id = schq;
723                 child->parent_hw_id = parent->hw_id;
724                 child->flags |= NIX_TM_NODE_HWRES;
725         }
726
727         return 0;
728 }
729
730 int
731 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
732 {
733         struct nix_tm_node *parent, *root = NULL;
734         struct plt_bitmap *bmp, *bmp_contig;
735         struct nix_tm_node_list *list;
736         uint8_t child_hw_lvl, hw_lvl;
737         uint16_t contig_id, j;
738         uint64_t slab = 0;
739         uint32_t pos = 0;
740         int cnt, rc;
741
742         list = nix_tm_node_list(nix, tree);
743         /* Walk from TL1 to TL4 parents */
744         for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
745                 TAILQ_FOREACH(parent, list, node) {
746                         child_hw_lvl = parent->hw_lvl - 1;
747                         if (parent->hw_lvl != hw_lvl)
748                                 continue;
749
750                         /* Remember root for future */
751                         if (parent->hw_lvl == nix->tm_root_lvl)
752                                 root = parent;
753
754                         if (!parent->child_realloc) {
755                                 /* Skip when parent is not dirty */
756                                 if (nix_tm_child_res_valid(list, parent))
757                                         continue;
758                                 plt_err("Parent not dirty but invalid "
759                                         "child res parent id %u(lvl %u)",
760                                         parent->id, parent->lvl);
761                                 return -EFAULT;
762                         }
763
764                         bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
765
766                         /* Prealloc contiguous indices for a parent */
767                         contig_id = NIX_TM_MAX_HW_TXSCHQ;
768                         cnt = (int)parent->max_prio + 1;
769                         if (cnt > 0) {
770                                 plt_bitmap_scan_init(bmp_contig);
771                                 if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
772                                         plt_err("Contig schq not found");
773                                         return -ENOENT;
774                                 }
775                                 contig_id = pos + bitmap_ctzll(slab);
776
777                                 /* Check if we have enough */
778                                 for (j = contig_id; j < contig_id + cnt; j++) {
779                                         if (!plt_bitmap_get(bmp_contig, j))
780                                                 break;
781                                 }
782
783                                 if (j != contig_id + cnt) {
784                                         plt_err("Contig schq not sufficient");
785                                         return -ENOENT;
786                                 }
787
788                                 for (j = contig_id; j < contig_id + cnt; j++)
789                                         plt_bitmap_clear(bmp_contig, j);
790                         }
791
792                         /* Assign hw id to all children */
793                         rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
794                                                  list);
795                         if (cnt || rc) {
796                                 plt_err("Unexpected err, contig res alloc, "
797                                         "parent %u, of %s, rc=%d, cnt=%d",
798                                         parent->id, nix_tm_hwlvl2str(hw_lvl),
799                                         rc, cnt);
800                                 return -EFAULT;
801                         }
802
803                         /* Clear the dirty bit as children's
804                          * resources are reallocated.
805                          */
806                         parent->child_realloc = false;
807                 }
808         }
809
810         /* Root is always expected to be there */
811         if (!root)
812                 return -EFAULT;
813
814         if (root->flags & NIX_TM_NODE_HWRES)
815                 return 0;
816
817         /* Process root node */
818         bmp = nix->schq_bmp[nix->tm_root_lvl];
819         plt_bitmap_scan_init(bmp);
820         if (!plt_bitmap_scan(bmp, &pos, &slab)) {
821                 plt_err("Resource not allocated for root");
822                 return -EIO;
823         }
824
825         root->hw_id = pos + bitmap_ctzll(slab);
826         root->flags |= NIX_TM_NODE_HWRES;
827         plt_bitmap_clear(bmp, root->hw_id);
828
829         /* Get TL1 id as well when root is not TL1 */
830         if (!nix_tm_have_tl1_access(nix)) {
831                 bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
832
833                 plt_bitmap_scan_init(bmp);
834                 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
835                         plt_err("Resource not found for TL1");
836                         return -EIO;
837                 }
838                 root->parent_hw_id = pos + bitmap_ctzll(slab);
839                 plt_bitmap_clear(bmp, root->parent_hw_id);
840         }
841
842         plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
843                    nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
844
845         return 0;
846 }
847
848 void
849 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
850 {
851         uint8_t lvl;
852         uint16_t i;
853
854         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
855                 for (i = 0; i < rsp->schq[lvl]; i++)
856                         plt_bitmap_set(nix->schq_bmp[lvl],
857                                        rsp->schq_list[lvl][i]);
858
859                 for (i = 0; i < rsp->schq_contig[lvl]; i++)
860                         plt_bitmap_set(nix->schq_contig_bmp[lvl],
861                                        rsp->schq_contig_list[lvl][i]);
862         }
863 }
864
865 int
866 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
867 {
868         uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
869         struct mbox *mbox = (&nix->dev)->mbox;
870         uint16_t schq[NIX_TXSCH_LVL_CNT];
871         struct nix_txsch_alloc_req *req;
872         struct nix_txsch_alloc_rsp *rsp;
873         uint8_t hw_lvl, i;
874         bool pend;
875         int rc;
876
877         memset(schq, 0, sizeof(schq));
878         memset(schq_contig, 0, sizeof(schq_contig));
879
880         /* Estimate requirement */
881         rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
882         if (!rc)
883                 return 0;
884
885         /* Release existing contiguous resources when realloc requested
886          * as there is no way to guarantee continuity of old with new.
887          */
888         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
889                 if (schq_contig[hw_lvl])
890                         nix_tm_release_resources(nix, hw_lvl, true, false);
891         }
892
893         /* Alloc as needed */
894         do {
895                 pend = false;
896                 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
897                 if (!req) {
898                         rc = -ENOMEM;
899                         goto alloc_err;
900                 }
901                 mbox_memcpy(req->schq, schq, sizeof(req->schq));
902                 mbox_memcpy(req->schq_contig, schq_contig,
903                             sizeof(req->schq_contig));
904
905                 /* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
906                  * So split alloc to multiple requests.
907                  */
908                 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
909                         if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
910                                 req->schq[i] = MAX_TXSCHQ_PER_FUNC;
911                         schq[i] -= req->schq[i];
912
913                         if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
914                                 req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
915                         schq_contig[i] -= req->schq_contig[i];
916
917                         if (schq[i] || schq_contig[i])
918                                 pend = true;
919                 }
920
921                 rc = mbox_process_msg(mbox, (void *)&rsp);
922                 if (rc)
923                         goto alloc_err;
924
925                 nix_tm_copy_rsp_to_nix(nix, rsp);
926         } while (pend);
927
928         nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
929         return 0;
930 alloc_err:
931         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
932                 if (nix_tm_release_resources(nix, i, true, false))
933                         plt_err("Failed to release contig resources of "
934                                 "lvl %d on error",
935                                 i);
936                 if (nix_tm_release_resources(nix, i, false, false))
937                         plt_err("Failed to release discontig resources of "
938                                 "lvl %d on error",
939                                 i);
940         }
941         return rc;
942 }
943
944 int
945 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
946 {
947         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
948         struct nix_tm_shaper_profile *profile;
949         struct nix_tm_node *node, *next_node;
950         struct nix_tm_node_list *list;
951         enum roc_nix_tm_tree tree;
952         uint32_t profile_id;
953         int rc = 0;
954
955         for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
956                 if (!(tree_mask & BIT(tree)))
957                         continue;
958
959                 plt_tm_dbg("Freeing resources of tree %u", tree);
960
961                 list = nix_tm_node_list(nix, tree);
962                 next_node = TAILQ_FIRST(list);
963                 while (next_node) {
964                         node = next_node;
965                         next_node = TAILQ_NEXT(node, node);
966
967                         if (!nix_tm_is_leaf(nix, node->lvl) &&
968                             node->flags & NIX_TM_NODE_HWRES) {
969                                 /* Clear xoff in path for flush to succeed */
970                                 rc = nix_tm_clear_path_xoff(nix, node);
971                                 if (rc)
972                                         return rc;
973                                 rc = nix_tm_free_node_resource(nix, node);
974                                 if (rc)
975                                         return rc;
976                         }
977                 }
978
979                 /* Leave software elements if needed */
980                 if (hw_only)
981                         continue;
982
983                 next_node = TAILQ_FIRST(list);
984                 while (next_node) {
985                         node = next_node;
986                         next_node = TAILQ_NEXT(node, node);
987
988                         plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
989                                    node->id, node);
990
991                         profile_id = node->shaper_profile_id;
992                         profile = nix_tm_shaper_profile_search(nix, profile_id);
993                         if (profile)
994                                 profile->ref_cnt--;
995
996                         TAILQ_REMOVE(list, node, node);
997                         nix_tm_node_free(node);
998                 }
999         }
1000         return rc;
1001 }
1002
1003 int
1004 nix_tm_conf_init(struct roc_nix *roc_nix)
1005 {
1006         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1007         uint32_t bmp_sz, hw_lvl;
1008         void *bmp_mem;
1009         int rc, i;
1010
1011         PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1012         PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1013                           ROC_NIX_TM_SHAPER_PROFILE_SZ);
1014
1015         nix->tm_flags = 0;
1016         for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1017                 TAILQ_INIT(&nix->trees[i]);
1018
1019         TAILQ_INIT(&nix->shaper_profile_list);
1020         nix->tm_rate_min = 1E9; /* 1Gbps */
1021
1022         rc = -ENOMEM;
1023         bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1024         bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1025         if (!bmp_mem)
1026                 return rc;
1027         nix->schq_bmp_mem = bmp_mem;
1028
1029         /* Init contiguous and discontiguous bitmap per lvl */
1030         rc = -EIO;
1031         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1032                 /* Bitmap for discontiguous resource */
1033                 nix->schq_bmp[hw_lvl] =
1034                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1035                 if (!nix->schq_bmp[hw_lvl])
1036                         goto exit;
1037
1038                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1039
1040                 /* Bitmap for contiguous resource */
1041                 nix->schq_contig_bmp[hw_lvl] =
1042                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1043                 if (!nix->schq_contig_bmp[hw_lvl])
1044                         goto exit;
1045
1046                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1047         }
1048
1049         /* Disable TL1 Static Priority when VF's are enabled
1050          * as otherwise VF's TL2 reallocation will be needed
1051          * runtime to support a specific topology of PF.
1052          */
1053         if (nix->pci_dev->max_vfs)
1054                 nix->tm_flags |= NIX_TM_TL1_NO_SP;
1055
1056         /* TL1 access is only for PF's */
1057         if (roc_nix_is_pf(roc_nix)) {
1058                 nix->tm_flags |= NIX_TM_TL1_ACCESS;
1059                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
1060         } else {
1061                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
1062         }
1063
1064         return 0;
1065 exit:
1066         nix_tm_conf_fini(roc_nix);
1067         return rc;
1068 }
1069
1070 void
1071 nix_tm_conf_fini(struct roc_nix *roc_nix)
1072 {
1073         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1074         uint16_t hw_lvl;
1075
1076         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1077                 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
1078                 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
1079         }
1080         plt_free(nix->schq_bmp_mem);
1081 }