common/cnxk: add NIX TM hierarchy enable/disable
[dpdk.git] / drivers / common / cnxk / roc_nix_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 static inline int
9 bitmap_ctzll(uint64_t slab)
10 {
11         if (slab == 0)
12                 return 0;
13
14         return __builtin_ctzll(slab);
15 }
16
17 void
18 nix_tm_clear_shaper_profiles(struct nix *nix)
19 {
20         struct nix_tm_shaper_profile *shaper_profile;
21
22         shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
23         while (shaper_profile != NULL) {
24                 if (shaper_profile->ref_cnt)
25                         plt_warn("Shaper profile %u has non zero references",
26                                  shaper_profile->id);
27                 TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
28                 nix_tm_shaper_profile_free(shaper_profile);
29                 shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
30         }
31 }
32
33 static int
34 nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
35 {
36         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
37         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
38         struct nix_tm_shaper_profile *profile;
39         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
40         struct mbox *mbox = (&nix->dev)->mbox;
41         struct nix_txschq_config *req;
42         int rc = -EFAULT;
43         uint32_t hw_lvl;
44         uint8_t k = 0;
45
46         memset(regval, 0, sizeof(regval));
47         memset(regval_mask, 0, sizeof(regval_mask));
48
49         profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
50         hw_lvl = node->hw_lvl;
51
52         /* Need this trigger to configure TL1 */
53         if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
54                 /* Prepare default conf for TL1 */
55                 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
56                 req->lvl = NIX_TXSCH_LVL_TL1;
57
58                 k = nix_tm_tl1_default_prep(node->parent_hw_id, req->reg,
59                                             req->regval);
60                 req->num_regs = k;
61                 rc = mbox_process(mbox);
62                 if (rc)
63                         goto error;
64         }
65
66         /* Prepare topology config */
67         k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
68
69         /* Prepare schedule config */
70         k += nix_tm_sched_reg_prep(nix, node, &reg[k], &regval[k]);
71
72         /* Prepare shaping config */
73         k += nix_tm_shaper_reg_prep(node, profile, &reg[k], &regval[k]);
74
75         if (!k)
76                 return 0;
77
78         /* Copy and send config mbox */
79         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
80         req->lvl = hw_lvl;
81         req->num_regs = k;
82
83         mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
84         mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
85         mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
86
87         rc = mbox_process(mbox);
88         if (rc)
89                 goto error;
90
91         return 0;
92 error:
93         plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
94         return rc;
95 }
96
97 int
98 nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
99 {
100         struct nix_tm_node_list *list;
101         struct nix_tm_node *node;
102         uint32_t hw_lvl;
103         int rc = 0;
104
105         list = nix_tm_node_list(nix, tree);
106
107         for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
108                 TAILQ_FOREACH(node, list, node) {
109                         if (node->hw_lvl != hw_lvl)
110                                 continue;
111                         rc = nix_tm_node_reg_conf(nix, node);
112                         if (rc)
113                                 goto exit;
114                 }
115         }
116 exit:
117         return rc;
118 }
119
120 int
121 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
122 {
123         struct nix_tm_node *child, *parent;
124         struct nix_tm_node_list *list;
125         uint32_t rr_prio, max_prio;
126         uint32_t rr_num = 0;
127
128         list = nix_tm_node_list(nix, tree);
129
130         /* Release all the node hw resources locally
131          * if parent marked as dirty and resource exists.
132          */
133         TAILQ_FOREACH(child, list, node) {
134                 /* Release resource only if parent direct hierarchy changed */
135                 if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
136                     child->parent->child_realloc) {
137                         nix_tm_free_node_resource(nix, child);
138                 }
139                 child->max_prio = UINT32_MAX;
140         }
141
142         TAILQ_FOREACH(parent, list, node) {
143                 /* Count group of children of same priority i.e are RR */
144                 rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
145                                          &max_prio);
146
147                 /* Assuming that multiple RR groups are
148                  * not configured based on capability.
149                  */
150                 parent->rr_prio = rr_prio;
151                 parent->rr_num = rr_num;
152                 parent->max_prio = max_prio;
153         }
154
155         return 0;
156 }
157
158 int
159 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
160 {
161         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
162         struct nix_tm_shaper_profile *profile;
163         uint32_t node_id, parent_id, lvl;
164         struct nix_tm_node *parent_node;
165         uint32_t priority, profile_id;
166         uint8_t hw_lvl, exp_next_lvl;
167         enum roc_nix_tm_tree tree;
168         int rc;
169
170         node_id = node->id;
171         priority = node->priority;
172         parent_id = node->parent_id;
173         profile_id = node->shaper_profile_id;
174         lvl = node->lvl;
175         tree = node->tree;
176
177         plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
178                    "parent %u profile 0x%x tree %u",
179                    nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
180                    priority, node->weight, parent_id, profile_id, tree);
181
182         if (tree >= ROC_NIX_TM_TREE_MAX)
183                 return NIX_ERR_PARAM;
184
185         /* Translate sw level id's to nix hw level id's */
186         hw_lvl = nix_tm_lvl2nix(nix, lvl);
187         if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
188                 return NIX_ERR_TM_INVALID_LVL;
189
190         /* Leaf nodes have to be same priority */
191         if (nix_tm_is_leaf(nix, lvl) && priority != 0)
192                 return NIX_ERR_TM_INVALID_PRIO;
193
194         parent_node = nix_tm_node_search(nix, parent_id, tree);
195
196         if (node_id < nix->nb_tx_queues)
197                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
198         else
199                 exp_next_lvl = hw_lvl + 1;
200
201         /* Check if there is no parent node yet */
202         if (hw_lvl != nix->tm_root_lvl &&
203             (!parent_node || parent_node->hw_lvl != exp_next_lvl))
204                 return NIX_ERR_TM_INVALID_PARENT;
205
206         /* Check if a node already exists */
207         if (nix_tm_node_search(nix, node_id, tree))
208                 return NIX_ERR_TM_NODE_EXISTS;
209
210         profile = nix_tm_shaper_profile_search(nix, profile_id);
211         if (!nix_tm_is_leaf(nix, lvl)) {
212                 /* Check if shaper profile exists for non leaf node */
213                 if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
214                         return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
215
216                 /* Packet mode in profile should match with that of tm node */
217                 if (profile && profile->pkt_mode != node->pkt_mode)
218                         return NIX_ERR_TM_PKT_MODE_MISMATCH;
219         }
220
221         /* Check if there is second DWRR already in siblings or holes in prio */
222         rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
223         if (rc)
224                 return rc;
225
226         if (node->weight > ROC_NIX_TM_MAX_SCHED_WT)
227                 return NIX_ERR_TM_WEIGHT_EXCEED;
228
229         /* Maintain minimum weight */
230         if (!node->weight)
231                 node->weight = 1;
232
233         node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
234         node->rr_prio = 0xF;
235         node->max_prio = UINT32_MAX;
236         node->hw_id = NIX_TM_HW_ID_INVALID;
237         node->flags = 0;
238
239         if (profile)
240                 profile->ref_cnt++;
241
242         node->parent = parent_node;
243         if (parent_node)
244                 parent_node->child_realloc = true;
245         node->parent_hw_id = NIX_TM_HW_ID_INVALID;
246
247         TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
248         plt_tm_dbg("Added node %s lvl %u id %u (%p)",
249                    nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
250         return 0;
251 }
252
253 int
254 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
255 {
256         struct mbox *mbox = (&nix->dev)->mbox;
257         struct nix_txschq_config *req;
258         struct nix_tm_node *p;
259         int rc;
260
261         /* Enable nodes in path for flush to succeed */
262         if (!nix_tm_is_leaf(nix, node->lvl))
263                 p = node;
264         else
265                 p = node->parent;
266         while (p) {
267                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
268                     (p->flags & NIX_TM_NODE_HWRES)) {
269                         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
270                         req->lvl = p->hw_lvl;
271                         req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
272                                                             req->regval);
273                         rc = mbox_process(mbox);
274                         if (rc)
275                                 return rc;
276
277                         p->flags |= NIX_TM_NODE_ENABLED;
278                 }
279                 p = p->parent;
280         }
281
282         return 0;
283 }
284
285 int
286 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
287 {
288         struct mbox *mbox = (&nix->dev)->mbox;
289         struct nix_txschq_config *req;
290         uint16_t smq;
291         int rc;
292
293         smq = node->hw_id;
294         plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
295                    enable ? "enable" : "disable");
296
297         rc = nix_tm_clear_path_xoff(nix, node);
298         if (rc)
299                 return rc;
300
301         req = mbox_alloc_msg_nix_txschq_cfg(mbox);
302         req->lvl = NIX_TXSCH_LVL_SMQ;
303         req->num_regs = 1;
304
305         req->reg[0] = NIX_AF_SMQX_CFG(smq);
306         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
307         req->regval_mask[0] =
308                 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
309
310         return mbox_process(mbox);
311 }
312
313 int
314 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
315                      uint16_t *smq)
316 {
317         struct nix_tm_node *node;
318         int rc;
319
320         node = nix_tm_node_search(nix, sq, nix->tm_tree);
321
322         /* Check if we found a valid leaf node */
323         if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
324             node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
325                 return -EIO;
326         }
327
328         /* Get SMQ Id of leaf node's parent */
329         *smq = node->parent->hw_id;
330         *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
331
332         rc = nix_tm_smq_xoff(nix, node->parent, false);
333         if (rc)
334                 return rc;
335         node->flags |= NIX_TM_NODE_ENABLED;
336         return 0;
337 }
338
339 int
340 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
341 {
342         struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
343         uint16_t sqb_cnt, head_off, tail_off;
344         uint64_t wdata, val, prev;
345         uint16_t qid = sq->qid;
346         int64_t *regaddr;
347         uint64_t timeout; /* 10's of usec */
348
349         /* Wait for enough time based on shaper min rate */
350         timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
351         /* Wait for worst case scenario of this SQ being last priority
352          * and so have to wait for all other SQ's drain out by their own.
353          */
354         timeout = timeout * nix->nb_tx_queues;
355         timeout = timeout / nix->tm_rate_min;
356         if (!timeout)
357                 timeout = 10000;
358
359         wdata = ((uint64_t)qid << 32);
360         regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
361         val = roc_atomic64_add_nosync(wdata, regaddr);
362
363         /* Spin multiple iterations as "sq->fc_cache_pkts" can still
364          * have space to send pkts even though fc_mem is disabled
365          */
366
367         while (true) {
368                 prev = val;
369                 plt_delay_us(10);
370                 val = roc_atomic64_add_nosync(wdata, regaddr);
371                 /* Continue on error */
372                 if (val & BIT_ULL(63))
373                         continue;
374
375                 if (prev != val)
376                         continue;
377
378                 sqb_cnt = val & 0xFFFF;
379                 head_off = (val >> 20) & 0x3F;
380                 tail_off = (val >> 28) & 0x3F;
381
382                 /* SQ reached quiescent state */
383                 if (sqb_cnt <= 1 && head_off == tail_off &&
384                     (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
385                         break;
386                 }
387
388                 /* Timeout */
389                 if (!timeout)
390                         goto exit;
391                 timeout--;
392         }
393
394         return 0;
395 exit:
396         roc_nix_queues_ctx_dump(sq->roc_nix);
397         return -EFAULT;
398 }
399
400 /* Flush and disable tx queue and its parent SMQ */
401 int
402 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
403 {
404         struct roc_nix *roc_nix = sq->roc_nix;
405         struct nix_tm_node *node, *sibling;
406         struct nix_tm_node_list *list;
407         enum roc_nix_tm_tree tree;
408         struct mbox *mbox;
409         struct nix *nix;
410         uint16_t qid;
411         int rc;
412
413         nix = roc_nix_to_nix_priv(roc_nix);
414
415         /* Need not do anything if tree is in disabled state */
416         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
417                 return 0;
418
419         mbox = (&nix->dev)->mbox;
420         qid = sq->qid;
421
422         tree = nix->tm_tree;
423         list = nix_tm_node_list(nix, tree);
424
425         /* Find the node for this SQ */
426         node = nix_tm_node_search(nix, qid, tree);
427         if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
428                 plt_err("Invalid node/state for sq %u", qid);
429                 return -EFAULT;
430         }
431
432         /* Enable CGX RXTX to drain pkts */
433         if (!roc_nix->io_enabled) {
434                 /* Though it enables both RX MCAM Entries and CGX Link
435                  * we assume all the rx queues are stopped way back.
436                  */
437                 mbox_alloc_msg_nix_lf_start_rx(mbox);
438                 rc = mbox_process(mbox);
439                 if (rc) {
440                         plt_err("cgx start failed, rc=%d", rc);
441                         return rc;
442                 }
443         }
444
445         /* Disable smq xoff for case it was enabled earlier */
446         rc = nix_tm_smq_xoff(nix, node->parent, false);
447         if (rc) {
448                 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
449                         rc);
450                 return rc;
451         }
452
453         /* As per HRM, to disable an SQ, all other SQ's
454          * that feed to same SMQ must be paused before SMQ flush.
455          */
456         TAILQ_FOREACH(sibling, list, node) {
457                 if (sibling->parent != node->parent)
458                         continue;
459                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
460                         continue;
461
462                 qid = sibling->id;
463                 sq = nix->sqs[qid];
464                 if (!sq)
465                         continue;
466
467                 rc = roc_nix_tm_sq_aura_fc(sq, false);
468                 if (rc) {
469                         plt_err("Failed to disable sqb aura fc, rc=%d", rc);
470                         goto cleanup;
471                 }
472
473                 /* Wait for sq entries to be flushed */
474                 rc = roc_nix_tm_sq_flush_spin(sq);
475                 if (rc) {
476                         plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
477                         return rc;
478                 }
479         }
480
481         node->flags &= ~NIX_TM_NODE_ENABLED;
482
483         /* Disable and flush */
484         rc = nix_tm_smq_xoff(nix, node->parent, true);
485         if (rc) {
486                 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
487                         rc);
488                 goto cleanup;
489         }
490 cleanup:
491         /* Restore cgx state */
492         if (!roc_nix->io_enabled) {
493                 mbox_alloc_msg_nix_lf_stop_rx(mbox);
494                 rc |= mbox_process(mbox);
495         }
496
497         return rc;
498 }
499
500 int
501 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
502 {
503         struct roc_nix *roc_nix = sq->roc_nix;
504         struct nix_tm_node *node, *sibling;
505         struct nix_tm_node_list *list;
506         enum roc_nix_tm_tree tree;
507         struct roc_nix_sq *s_sq;
508         bool once = false;
509         uint16_t qid, s_qid;
510         struct nix *nix;
511         int rc;
512
513         nix = roc_nix_to_nix_priv(roc_nix);
514
515         /* Need not do anything if tree is in disabled state */
516         if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
517                 return 0;
518
519         qid = sq->qid;
520         tree = nix->tm_tree;
521         list = nix_tm_node_list(nix, tree);
522
523         /* Find the node for this SQ */
524         node = nix_tm_node_search(nix, qid, tree);
525         if (!node) {
526                 plt_err("Invalid node for sq %u", qid);
527                 return -EFAULT;
528         }
529
530         /* Enable all the siblings back */
531         TAILQ_FOREACH(sibling, list, node) {
532                 if (sibling->parent != node->parent)
533                         continue;
534
535                 if (sibling->id == qid)
536                         continue;
537
538                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
539                         continue;
540
541                 s_qid = sibling->id;
542                 s_sq = nix->sqs[s_qid];
543                 if (!s_sq)
544                         continue;
545
546                 if (!once) {
547                         /* Enable back if any SQ is still present */
548                         rc = nix_tm_smq_xoff(nix, node->parent, false);
549                         if (rc) {
550                                 plt_err("Failed to enable smq %u, rc=%d",
551                                         node->parent->hw_id, rc);
552                                 return rc;
553                         }
554                         once = true;
555                 }
556
557                 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
558                 if (rc) {
559                         plt_err("Failed to enable sqb aura fc, rc=%d", rc);
560                         return rc;
561                 }
562         }
563
564         return 0;
565 }
566
567 int
568 nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
569                      bool rr_quantum_only)
570 {
571         struct mbox *mbox = (&nix->dev)->mbox;
572         uint16_t qid = node->id, smq;
573         uint64_t rr_quantum;
574         int rc;
575
576         smq = node->parent->hw_id;
577         rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
578
579         if (rr_quantum_only)
580                 plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
581                            rr_quantum);
582         else
583                 plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
584                            qid, smq, rr_quantum);
585
586         if (qid > nix->nb_tx_queues)
587                 return -EFAULT;
588
589         if (roc_model_is_cn9k()) {
590                 struct nix_aq_enq_req *aq;
591
592                 aq = mbox_alloc_msg_nix_aq_enq(mbox);
593                 aq->qidx = qid;
594                 aq->ctype = NIX_AQ_CTYPE_SQ;
595                 aq->op = NIX_AQ_INSTOP_WRITE;
596
597                 /* smq update only when needed */
598                 if (!rr_quantum_only) {
599                         aq->sq.smq = smq;
600                         aq->sq_mask.smq = ~aq->sq_mask.smq;
601                 }
602                 aq->sq.smq_rr_quantum = rr_quantum;
603                 aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
604         } else {
605                 struct nix_cn10k_aq_enq_req *aq;
606
607                 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
608                 aq->qidx = qid;
609                 aq->ctype = NIX_AQ_CTYPE_SQ;
610                 aq->op = NIX_AQ_INSTOP_WRITE;
611
612                 /* smq update only when needed */
613                 if (!rr_quantum_only) {
614                         aq->sq.smq = smq;
615                         aq->sq_mask.smq = ~aq->sq_mask.smq;
616                 }
617                 aq->sq.smq_rr_weight = rr_quantum;
618                 aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
619         }
620
621         rc = mbox_process(mbox);
622         if (rc)
623                 plt_err("Failed to set smq, rc=%d", rc);
624         return rc;
625 }
626
627 int
628 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
629                          bool above_thresh)
630 {
631         uint16_t avail, thresh, to_free = 0, schq;
632         struct mbox *mbox = (&nix->dev)->mbox;
633         struct nix_txsch_free_req *req;
634         struct plt_bitmap *bmp;
635         uint64_t slab = 0;
636         uint32_t pos = 0;
637         int rc = -ENOSPC;
638
639         bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
640         thresh =
641                 contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
642         plt_bitmap_scan_init(bmp);
643
644         avail = nix_tm_resource_avail(nix, hw_lvl, contig);
645
646         if (above_thresh) {
647                 /* Release only above threshold */
648                 if (avail > thresh)
649                         to_free = avail - thresh;
650         } else {
651                 /* Release everything */
652                 to_free = avail;
653         }
654
655         /* Now release resources to AF */
656         while (to_free) {
657                 if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
658                         break;
659
660                 schq = bitmap_ctzll(slab);
661                 slab &= ~(1ULL << schq);
662                 schq += pos;
663
664                 /* Free to AF */
665                 req = mbox_alloc_msg_nix_txsch_free(mbox);
666                 if (req == NULL)
667                         return rc;
668                 req->flags = 0;
669                 req->schq_lvl = hw_lvl;
670                 req->schq = schq;
671                 rc = mbox_process(mbox);
672                 if (rc) {
673                         plt_err("failed to release hwres %s(%u) rc %d",
674                                 nix_tm_hwlvl2str(hw_lvl), schq, rc);
675                         return rc;
676                 }
677
678                 plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
679                            schq);
680                 plt_bitmap_clear(bmp, schq);
681                 to_free--;
682         }
683
684         if (to_free) {
685                 plt_err("resource inconsistency for %s(%u)",
686                         nix_tm_hwlvl2str(hw_lvl), contig);
687                 return -EFAULT;
688         }
689         return 0;
690 }
691
692 int
693 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
694 {
695         struct mbox *mbox = (&nix->dev)->mbox;
696         struct nix_txsch_free_req *req;
697         struct plt_bitmap *bmp;
698         uint16_t avail, hw_id;
699         uint8_t hw_lvl;
700         int rc = -ENOSPC;
701
702         hw_lvl = node->hw_lvl;
703         hw_id = node->hw_id;
704         bmp = nix->schq_bmp[hw_lvl];
705         /* Free specific HW resource */
706         plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
707                    nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
708                    node);
709
710         avail = nix_tm_resource_avail(nix, hw_lvl, false);
711         /* Always for now free to discontiguous queue when avail
712          * is not sufficient.
713          */
714         if (nix->discontig_rsvd[hw_lvl] &&
715             avail < nix->discontig_rsvd[hw_lvl]) {
716                 PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
717                 PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
718                 plt_bitmap_set(bmp, hw_id);
719                 node->hw_id = NIX_TM_HW_ID_INVALID;
720                 node->flags &= ~NIX_TM_NODE_HWRES;
721                 return 0;
722         }
723
724         /* Free to AF */
725         req = mbox_alloc_msg_nix_txsch_free(mbox);
726         if (req == NULL)
727                 return rc;
728         req->flags = 0;
729         req->schq_lvl = node->hw_lvl;
730         req->schq = hw_id;
731         rc = mbox_process(mbox);
732         if (rc) {
733                 plt_err("failed to release hwres %s(%u) rc %d",
734                         nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
735                 return rc;
736         }
737
738         /* Mark parent as dirty for reallocing it's children */
739         if (node->parent)
740                 node->parent->child_realloc = true;
741
742         node->hw_id = NIX_TM_HW_ID_INVALID;
743         node->flags &= ~NIX_TM_NODE_HWRES;
744         plt_tm_dbg("Released hwres %s(%u) to af",
745                    nix_tm_hwlvl2str(node->hw_lvl), hw_id);
746         return 0;
747 }
748
749 int
750 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
751                    enum roc_nix_tm_tree tree, bool free)
752 {
753         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
754         struct nix_tm_shaper_profile *profile;
755         struct nix_tm_node *node, *child;
756         struct nix_tm_node_list *list;
757         uint32_t profile_id;
758         int rc;
759
760         plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
761
762         node = nix_tm_node_search(nix, node_id, tree);
763         if (!node)
764                 return NIX_ERR_TM_INVALID_NODE;
765
766         list = nix_tm_node_list(nix, tree);
767         /* Check for any existing children */
768         TAILQ_FOREACH(child, list, node) {
769                 if (child->parent == node)
770                         return NIX_ERR_TM_CHILD_EXISTS;
771         }
772
773         /* Remove shaper profile reference */
774         profile_id = node->shaper_profile_id;
775         profile = nix_tm_shaper_profile_search(nix, profile_id);
776
777         /* Free hw resource locally */
778         if (node->flags & NIX_TM_NODE_HWRES) {
779                 rc = nix_tm_free_node_resource(nix, node);
780                 if (rc)
781                         return rc;
782         }
783
784         if (profile)
785                 profile->ref_cnt--;
786
787         TAILQ_REMOVE(list, node, node);
788
789         plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
790                    "parent %u profile 0x%x tree %u (%p)",
791                    nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
792                    node->priority, node->weight,
793                    node->parent ? node->parent->id : UINT32_MAX,
794                    node->shaper_profile_id, tree, node);
795         /* Free only if requested */
796         if (free)
797                 nix_tm_node_free(node);
798         return 0;
799 }
800
801 static int
802 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
803                     uint16_t *contig_id, int *contig_cnt,
804                     struct nix_tm_node_list *list)
805 {
806         struct nix_tm_node *child;
807         struct plt_bitmap *bmp;
808         uint8_t child_hw_lvl;
809         int spare_schq = -1;
810         uint32_t pos = 0;
811         uint64_t slab;
812         uint16_t schq;
813
814         child_hw_lvl = parent->hw_lvl - 1;
815         bmp = nix->schq_bmp[child_hw_lvl];
816         plt_bitmap_scan_init(bmp);
817         slab = 0;
818
819         /* Save spare schq if it is case of RR + SP */
820         if (parent->rr_prio != 0xf && *contig_cnt > 1)
821                 spare_schq = *contig_id + parent->rr_prio;
822
823         TAILQ_FOREACH(child, list, node) {
824                 if (!child->parent)
825                         continue;
826                 if (child->parent->id != parent->id)
827                         continue;
828
829                 /* Resource never expected to be present */
830                 if (child->flags & NIX_TM_NODE_HWRES) {
831                         plt_err("Resource exists for child (%s)%u, id %u (%p)",
832                                 nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
833                                 child->id, child);
834                         return -EFAULT;
835                 }
836
837                 if (!slab)
838                         plt_bitmap_scan(bmp, &pos, &slab);
839
840                 if (child->priority == parent->rr_prio && spare_schq != -1) {
841                         /* Use spare schq first if present */
842                         schq = spare_schq;
843                         spare_schq = -1;
844                         *contig_cnt = *contig_cnt - 1;
845
846                 } else if (child->priority == parent->rr_prio) {
847                         /* Assign a discontiguous queue */
848                         if (!slab) {
849                                 plt_err("Schq not found for Child %u "
850                                         "lvl %u (%p)",
851                                         child->id, child->lvl, child);
852                                 return -ENOENT;
853                         }
854
855                         schq = bitmap_ctzll(slab);
856                         slab &= ~(1ULL << schq);
857                         schq += pos;
858                         plt_bitmap_clear(bmp, schq);
859                 } else {
860                         /* Assign a contiguous queue */
861                         schq = *contig_id + child->priority;
862                         *contig_cnt = *contig_cnt - 1;
863                 }
864
865                 plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
866                            nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
867                            child->id, child);
868
869                 child->hw_id = schq;
870                 child->parent_hw_id = parent->hw_id;
871                 child->flags |= NIX_TM_NODE_HWRES;
872         }
873
874         return 0;
875 }
876
877 int
878 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
879 {
880         struct nix_tm_node *parent, *root = NULL;
881         struct plt_bitmap *bmp, *bmp_contig;
882         struct nix_tm_node_list *list;
883         uint8_t child_hw_lvl, hw_lvl;
884         uint16_t contig_id, j;
885         uint64_t slab = 0;
886         uint32_t pos = 0;
887         int cnt, rc;
888
889         list = nix_tm_node_list(nix, tree);
890         /* Walk from TL1 to TL4 parents */
891         for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
892                 TAILQ_FOREACH(parent, list, node) {
893                         child_hw_lvl = parent->hw_lvl - 1;
894                         if (parent->hw_lvl != hw_lvl)
895                                 continue;
896
897                         /* Remember root for future */
898                         if (parent->hw_lvl == nix->tm_root_lvl)
899                                 root = parent;
900
901                         if (!parent->child_realloc) {
902                                 /* Skip when parent is not dirty */
903                                 if (nix_tm_child_res_valid(list, parent))
904                                         continue;
905                                 plt_err("Parent not dirty but invalid "
906                                         "child res parent id %u(lvl %u)",
907                                         parent->id, parent->lvl);
908                                 return -EFAULT;
909                         }
910
911                         bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
912
913                         /* Prealloc contiguous indices for a parent */
914                         contig_id = NIX_TM_MAX_HW_TXSCHQ;
915                         cnt = (int)parent->max_prio + 1;
916                         if (cnt > 0) {
917                                 plt_bitmap_scan_init(bmp_contig);
918                                 if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
919                                         plt_err("Contig schq not found");
920                                         return -ENOENT;
921                                 }
922                                 contig_id = pos + bitmap_ctzll(slab);
923
924                                 /* Check if we have enough */
925                                 for (j = contig_id; j < contig_id + cnt; j++) {
926                                         if (!plt_bitmap_get(bmp_contig, j))
927                                                 break;
928                                 }
929
930                                 if (j != contig_id + cnt) {
931                                         plt_err("Contig schq not sufficient");
932                                         return -ENOENT;
933                                 }
934
935                                 for (j = contig_id; j < contig_id + cnt; j++)
936                                         plt_bitmap_clear(bmp_contig, j);
937                         }
938
939                         /* Assign hw id to all children */
940                         rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
941                                                  list);
942                         if (cnt || rc) {
943                                 plt_err("Unexpected err, contig res alloc, "
944                                         "parent %u, of %s, rc=%d, cnt=%d",
945                                         parent->id, nix_tm_hwlvl2str(hw_lvl),
946                                         rc, cnt);
947                                 return -EFAULT;
948                         }
949
950                         /* Clear the dirty bit as children's
951                          * resources are reallocated.
952                          */
953                         parent->child_realloc = false;
954                 }
955         }
956
957         /* Root is always expected to be there */
958         if (!root)
959                 return -EFAULT;
960
961         if (root->flags & NIX_TM_NODE_HWRES)
962                 return 0;
963
964         /* Process root node */
965         bmp = nix->schq_bmp[nix->tm_root_lvl];
966         plt_bitmap_scan_init(bmp);
967         if (!plt_bitmap_scan(bmp, &pos, &slab)) {
968                 plt_err("Resource not allocated for root");
969                 return -EIO;
970         }
971
972         root->hw_id = pos + bitmap_ctzll(slab);
973         root->flags |= NIX_TM_NODE_HWRES;
974         plt_bitmap_clear(bmp, root->hw_id);
975
976         /* Get TL1 id as well when root is not TL1 */
977         if (!nix_tm_have_tl1_access(nix)) {
978                 bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
979
980                 plt_bitmap_scan_init(bmp);
981                 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
982                         plt_err("Resource not found for TL1");
983                         return -EIO;
984                 }
985                 root->parent_hw_id = pos + bitmap_ctzll(slab);
986                 plt_bitmap_clear(bmp, root->parent_hw_id);
987         }
988
989         plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
990                    nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
991
992         return 0;
993 }
994
995 void
996 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
997 {
998         uint8_t lvl;
999         uint16_t i;
1000
1001         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1002                 for (i = 0; i < rsp->schq[lvl]; i++)
1003                         plt_bitmap_set(nix->schq_bmp[lvl],
1004                                        rsp->schq_list[lvl][i]);
1005
1006                 for (i = 0; i < rsp->schq_contig[lvl]; i++)
1007                         plt_bitmap_set(nix->schq_contig_bmp[lvl],
1008                                        rsp->schq_contig_list[lvl][i]);
1009         }
1010 }
1011
1012 int
1013 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
1014 {
1015         uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
1016         struct mbox *mbox = (&nix->dev)->mbox;
1017         uint16_t schq[NIX_TXSCH_LVL_CNT];
1018         struct nix_txsch_alloc_req *req;
1019         struct nix_txsch_alloc_rsp *rsp;
1020         uint8_t hw_lvl, i;
1021         bool pend;
1022         int rc;
1023
1024         memset(schq, 0, sizeof(schq));
1025         memset(schq_contig, 0, sizeof(schq_contig));
1026
1027         /* Estimate requirement */
1028         rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
1029         if (!rc)
1030                 return 0;
1031
1032         /* Release existing contiguous resources when realloc requested
1033          * as there is no way to guarantee continuity of old with new.
1034          */
1035         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1036                 if (schq_contig[hw_lvl])
1037                         nix_tm_release_resources(nix, hw_lvl, true, false);
1038         }
1039
1040         /* Alloc as needed */
1041         do {
1042                 pend = false;
1043                 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
1044                 if (!req) {
1045                         rc = -ENOMEM;
1046                         goto alloc_err;
1047                 }
1048                 mbox_memcpy(req->schq, schq, sizeof(req->schq));
1049                 mbox_memcpy(req->schq_contig, schq_contig,
1050                             sizeof(req->schq_contig));
1051
1052                 /* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
1053                  * So split alloc to multiple requests.
1054                  */
1055                 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1056                         if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
1057                                 req->schq[i] = MAX_TXSCHQ_PER_FUNC;
1058                         schq[i] -= req->schq[i];
1059
1060                         if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
1061                                 req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
1062                         schq_contig[i] -= req->schq_contig[i];
1063
1064                         if (schq[i] || schq_contig[i])
1065                                 pend = true;
1066                 }
1067
1068                 rc = mbox_process_msg(mbox, (void *)&rsp);
1069                 if (rc)
1070                         goto alloc_err;
1071
1072                 nix_tm_copy_rsp_to_nix(nix, rsp);
1073         } while (pend);
1074
1075         nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
1076         return 0;
1077 alloc_err:
1078         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1079                 if (nix_tm_release_resources(nix, i, true, false))
1080                         plt_err("Failed to release contig resources of "
1081                                 "lvl %d on error",
1082                                 i);
1083                 if (nix_tm_release_resources(nix, i, false, false))
1084                         plt_err("Failed to release discontig resources of "
1085                                 "lvl %d on error",
1086                                 i);
1087         }
1088         return rc;
1089 }
1090
1091 int
1092 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
1093 {
1094         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1095         struct nix_tm_shaper_profile *profile;
1096         struct nix_tm_node *node, *next_node;
1097         struct nix_tm_node_list *list;
1098         enum roc_nix_tm_tree tree;
1099         uint32_t profile_id;
1100         int rc = 0;
1101
1102         for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
1103                 if (!(tree_mask & BIT(tree)))
1104                         continue;
1105
1106                 plt_tm_dbg("Freeing resources of tree %u", tree);
1107
1108                 list = nix_tm_node_list(nix, tree);
1109                 next_node = TAILQ_FIRST(list);
1110                 while (next_node) {
1111                         node = next_node;
1112                         next_node = TAILQ_NEXT(node, node);
1113
1114                         if (!nix_tm_is_leaf(nix, node->lvl) &&
1115                             node->flags & NIX_TM_NODE_HWRES) {
1116                                 /* Clear xoff in path for flush to succeed */
1117                                 rc = nix_tm_clear_path_xoff(nix, node);
1118                                 if (rc)
1119                                         return rc;
1120                                 rc = nix_tm_free_node_resource(nix, node);
1121                                 if (rc)
1122                                         return rc;
1123                         }
1124                 }
1125
1126                 /* Leave software elements if needed */
1127                 if (hw_only)
1128                         continue;
1129
1130                 next_node = TAILQ_FIRST(list);
1131                 while (next_node) {
1132                         node = next_node;
1133                         next_node = TAILQ_NEXT(node, node);
1134
1135                         plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
1136                                    node->id, node);
1137
1138                         profile_id = node->shaper_profile_id;
1139                         profile = nix_tm_shaper_profile_search(nix, profile_id);
1140                         if (profile)
1141                                 profile->ref_cnt--;
1142
1143                         TAILQ_REMOVE(list, node, node);
1144                         nix_tm_node_free(node);
1145                 }
1146         }
1147         return rc;
1148 }
1149
1150 int
1151 nix_tm_conf_init(struct roc_nix *roc_nix)
1152 {
1153         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1154         uint32_t bmp_sz, hw_lvl;
1155         void *bmp_mem;
1156         int rc, i;
1157
1158         PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1159         PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1160                           ROC_NIX_TM_SHAPER_PROFILE_SZ);
1161
1162         nix->tm_flags = 0;
1163         for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1164                 TAILQ_INIT(&nix->trees[i]);
1165
1166         TAILQ_INIT(&nix->shaper_profile_list);
1167         nix->tm_rate_min = 1E9; /* 1Gbps */
1168
1169         rc = -ENOMEM;
1170         bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1171         bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1172         if (!bmp_mem)
1173                 return rc;
1174         nix->schq_bmp_mem = bmp_mem;
1175
1176         /* Init contiguous and discontiguous bitmap per lvl */
1177         rc = -EIO;
1178         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1179                 /* Bitmap for discontiguous resource */
1180                 nix->schq_bmp[hw_lvl] =
1181                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1182                 if (!nix->schq_bmp[hw_lvl])
1183                         goto exit;
1184
1185                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1186
1187                 /* Bitmap for contiguous resource */
1188                 nix->schq_contig_bmp[hw_lvl] =
1189                         plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1190                 if (!nix->schq_contig_bmp[hw_lvl])
1191                         goto exit;
1192
1193                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1194         }
1195
1196         /* Disable TL1 Static Priority when VF's are enabled
1197          * as otherwise VF's TL2 reallocation will be needed
1198          * runtime to support a specific topology of PF.
1199          */
1200         if (nix->pci_dev->max_vfs)
1201                 nix->tm_flags |= NIX_TM_TL1_NO_SP;
1202
1203         /* TL1 access is only for PF's */
1204         if (roc_nix_is_pf(roc_nix)) {
1205                 nix->tm_flags |= NIX_TM_TL1_ACCESS;
1206                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
1207         } else {
1208                 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
1209         }
1210
1211         return 0;
1212 exit:
1213         nix_tm_conf_fini(roc_nix);
1214         return rc;
1215 }
1216
1217 void
1218 nix_tm_conf_fini(struct roc_nix *roc_nix)
1219 {
1220         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1221         uint16_t hw_lvl;
1222
1223         for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1224                 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
1225                 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
1226         }
1227         plt_free(nix->schq_bmp_mem);
1228 }