1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
11 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
12 struct nix_tm_shaper_profile *profile;
13 uint32_t node_id, parent_id, lvl;
14 struct nix_tm_node *parent_node;
15 uint32_t priority, profile_id;
16 uint8_t hw_lvl, exp_next_lvl;
17 enum roc_nix_tm_tree tree;
21 priority = node->priority;
22 parent_id = node->parent_id;
23 profile_id = node->shaper_profile_id;
27 plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
28 "parent %u profile 0x%x tree %u",
29 nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
30 priority, node->weight, parent_id, profile_id, tree);
32 if (tree >= ROC_NIX_TM_TREE_MAX)
35 /* Translate sw level id's to nix hw level id's */
36 hw_lvl = nix_tm_lvl2nix(nix, lvl);
37 if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
38 return NIX_ERR_TM_INVALID_LVL;
40 /* Leaf nodes have to be same priority */
41 if (nix_tm_is_leaf(nix, lvl) && priority != 0)
42 return NIX_ERR_TM_INVALID_PRIO;
44 parent_node = nix_tm_node_search(nix, parent_id, tree);
46 if (node_id < nix->nb_tx_queues)
47 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
49 exp_next_lvl = hw_lvl + 1;
51 /* Check if there is no parent node yet */
52 if (hw_lvl != nix->tm_root_lvl &&
53 (!parent_node || parent_node->hw_lvl != exp_next_lvl))
54 return NIX_ERR_TM_INVALID_PARENT;
56 /* Check if a node already exists */
57 if (nix_tm_node_search(nix, node_id, tree))
58 return NIX_ERR_TM_NODE_EXISTS;
60 profile = nix_tm_shaper_profile_search(nix, profile_id);
61 if (!nix_tm_is_leaf(nix, lvl)) {
62 /* Check if shaper profile exists for non leaf node */
63 if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
64 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
66 /* Packet mode in profile should match with that of tm node */
67 if (profile && profile->pkt_mode != node->pkt_mode)
68 return NIX_ERR_TM_PKT_MODE_MISMATCH;
71 /* Check if there is second DWRR already in siblings or holes in prio */
72 rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
76 if (node->weight > ROC_NIX_TM_MAX_SCHED_WT)
77 return NIX_ERR_TM_WEIGHT_EXCEED;
79 /* Maintain minimum weight */
83 node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
85 node->max_prio = UINT32_MAX;
86 node->hw_id = NIX_TM_HW_ID_INVALID;
92 node->parent = parent_node;
94 parent_node->child_realloc = true;
95 node->parent_hw_id = NIX_TM_HW_ID_INVALID;
97 TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
98 plt_tm_dbg("Added node %s lvl %u id %u (%p)",
99 nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
104 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
106 struct mbox *mbox = (&nix->dev)->mbox;
107 struct nix_txschq_config *req;
108 struct nix_tm_node *p;
111 /* Enable nodes in path for flush to succeed */
112 if (!nix_tm_is_leaf(nix, node->lvl))
117 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
118 (p->flags & NIX_TM_NODE_HWRES)) {
119 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
120 req->lvl = p->hw_lvl;
121 req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
123 rc = mbox_process(mbox);
127 p->flags |= NIX_TM_NODE_ENABLED;
136 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
138 struct mbox *mbox = (&nix->dev)->mbox;
139 struct nix_txschq_config *req;
144 plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
145 enable ? "enable" : "disable");
147 rc = nix_tm_clear_path_xoff(nix, node);
151 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
152 req->lvl = NIX_TXSCH_LVL_SMQ;
155 req->reg[0] = NIX_AF_SMQX_CFG(smq);
156 req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
157 req->regval_mask[0] =
158 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
160 return mbox_process(mbox);
164 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
167 struct nix_tm_node *node;
170 node = nix_tm_node_search(nix, sq, nix->tm_tree);
172 /* Check if we found a valid leaf node */
173 if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
174 node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
178 /* Get SMQ Id of leaf node's parent */
179 *smq = node->parent->hw_id;
180 *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
182 rc = nix_tm_smq_xoff(nix, node->parent, false);
185 node->flags |= NIX_TM_NODE_ENABLED;
190 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
192 struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
193 uint16_t sqb_cnt, head_off, tail_off;
194 uint64_t wdata, val, prev;
195 uint16_t qid = sq->qid;
197 uint64_t timeout; /* 10's of usec */
199 /* Wait for enough time based on shaper min rate */
200 timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
201 /* Wait for worst case scenario of this SQ being last priority
202 * and so have to wait for all other SQ's drain out by their own.
204 timeout = timeout * nix->nb_tx_queues;
205 timeout = timeout / nix->tm_rate_min;
209 wdata = ((uint64_t)qid << 32);
210 regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
211 val = roc_atomic64_add_nosync(wdata, regaddr);
213 /* Spin multiple iterations as "sq->fc_cache_pkts" can still
214 * have space to send pkts even though fc_mem is disabled
220 val = roc_atomic64_add_nosync(wdata, regaddr);
221 /* Continue on error */
222 if (val & BIT_ULL(63))
228 sqb_cnt = val & 0xFFFF;
229 head_off = (val >> 20) & 0x3F;
230 tail_off = (val >> 28) & 0x3F;
232 /* SQ reached quiescent state */
233 if (sqb_cnt <= 1 && head_off == tail_off &&
234 (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
246 roc_nix_queues_ctx_dump(sq->roc_nix);
250 /* Flush and disable tx queue and its parent SMQ */
252 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
254 struct roc_nix *roc_nix = sq->roc_nix;
255 struct nix_tm_node *node, *sibling;
256 struct nix_tm_node_list *list;
257 enum roc_nix_tm_tree tree;
263 nix = roc_nix_to_nix_priv(roc_nix);
265 /* Need not do anything if tree is in disabled state */
266 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
269 mbox = (&nix->dev)->mbox;
273 list = nix_tm_node_list(nix, tree);
275 /* Find the node for this SQ */
276 node = nix_tm_node_search(nix, qid, tree);
277 if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
278 plt_err("Invalid node/state for sq %u", qid);
282 /* Enable CGX RXTX to drain pkts */
283 if (!roc_nix->io_enabled) {
284 /* Though it enables both RX MCAM Entries and CGX Link
285 * we assume all the rx queues are stopped way back.
287 mbox_alloc_msg_nix_lf_start_rx(mbox);
288 rc = mbox_process(mbox);
290 plt_err("cgx start failed, rc=%d", rc);
295 /* Disable smq xoff for case it was enabled earlier */
296 rc = nix_tm_smq_xoff(nix, node->parent, false);
298 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
303 /* As per HRM, to disable an SQ, all other SQ's
304 * that feed to same SMQ must be paused before SMQ flush.
306 TAILQ_FOREACH(sibling, list, node) {
307 if (sibling->parent != node->parent)
309 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
317 rc = roc_nix_tm_sq_aura_fc(sq, false);
319 plt_err("Failed to disable sqb aura fc, rc=%d", rc);
323 /* Wait for sq entries to be flushed */
324 rc = roc_nix_tm_sq_flush_spin(sq);
326 plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
331 node->flags &= ~NIX_TM_NODE_ENABLED;
333 /* Disable and flush */
334 rc = nix_tm_smq_xoff(nix, node->parent, true);
336 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
341 /* Restore cgx state */
342 if (!roc_nix->io_enabled) {
343 mbox_alloc_msg_nix_lf_stop_rx(mbox);
344 rc |= mbox_process(mbox);
351 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
353 struct roc_nix *roc_nix = sq->roc_nix;
354 struct nix_tm_node *node, *sibling;
355 struct nix_tm_node_list *list;
356 enum roc_nix_tm_tree tree;
357 struct roc_nix_sq *s_sq;
363 nix = roc_nix_to_nix_priv(roc_nix);
365 /* Need not do anything if tree is in disabled state */
366 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
371 list = nix_tm_node_list(nix, tree);
373 /* Find the node for this SQ */
374 node = nix_tm_node_search(nix, qid, tree);
376 plt_err("Invalid node for sq %u", qid);
380 /* Enable all the siblings back */
381 TAILQ_FOREACH(sibling, list, node) {
382 if (sibling->parent != node->parent)
385 if (sibling->id == qid)
388 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
392 s_sq = nix->sqs[s_qid];
397 /* Enable back if any SQ is still present */
398 rc = nix_tm_smq_xoff(nix, node->parent, false);
400 plt_err("Failed to enable smq %u, rc=%d",
401 node->parent->hw_id, rc);
407 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
409 plt_err("Failed to enable sqb aura fc, rc=%d", rc);
418 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
420 struct mbox *mbox = (&nix->dev)->mbox;
421 struct nix_txsch_free_req *req;
422 struct plt_bitmap *bmp;
423 uint16_t avail, hw_id;
427 hw_lvl = node->hw_lvl;
429 bmp = nix->schq_bmp[hw_lvl];
430 /* Free specific HW resource */
431 plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
432 nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
435 avail = nix_tm_resource_avail(nix, hw_lvl, false);
436 /* Always for now free to discontiguous queue when avail
439 if (nix->discontig_rsvd[hw_lvl] &&
440 avail < nix->discontig_rsvd[hw_lvl]) {
441 PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
442 PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
443 plt_bitmap_set(bmp, hw_id);
444 node->hw_id = NIX_TM_HW_ID_INVALID;
445 node->flags &= ~NIX_TM_NODE_HWRES;
450 req = mbox_alloc_msg_nix_txsch_free(mbox);
454 req->schq_lvl = node->hw_lvl;
456 rc = mbox_process(mbox);
458 plt_err("failed to release hwres %s(%u) rc %d",
459 nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
463 /* Mark parent as dirty for reallocing it's children */
465 node->parent->child_realloc = true;
467 node->hw_id = NIX_TM_HW_ID_INVALID;
468 node->flags &= ~NIX_TM_NODE_HWRES;
469 plt_tm_dbg("Released hwres %s(%u) to af",
470 nix_tm_hwlvl2str(node->hw_lvl), hw_id);
475 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
476 enum roc_nix_tm_tree tree, bool free)
478 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
479 struct nix_tm_shaper_profile *profile;
480 struct nix_tm_node *node, *child;
481 struct nix_tm_node_list *list;
485 plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
487 node = nix_tm_node_search(nix, node_id, tree);
489 return NIX_ERR_TM_INVALID_NODE;
491 list = nix_tm_node_list(nix, tree);
492 /* Check for any existing children */
493 TAILQ_FOREACH(child, list, node) {
494 if (child->parent == node)
495 return NIX_ERR_TM_CHILD_EXISTS;
498 /* Remove shaper profile reference */
499 profile_id = node->shaper_profile_id;
500 profile = nix_tm_shaper_profile_search(nix, profile_id);
502 /* Free hw resource locally */
503 if (node->flags & NIX_TM_NODE_HWRES) {
504 rc = nix_tm_free_node_resource(nix, node);
512 TAILQ_REMOVE(list, node, node);
514 plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
515 "parent %u profile 0x%x tree %u (%p)",
516 nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
517 node->priority, node->weight,
518 node->parent ? node->parent->id : UINT32_MAX,
519 node->shaper_profile_id, tree, node);
520 /* Free only if requested */
522 nix_tm_node_free(node);
527 nix_tm_conf_init(struct roc_nix *roc_nix)
529 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
530 uint32_t bmp_sz, hw_lvl;
534 PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
537 for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
538 TAILQ_INIT(&nix->trees[i]);
540 TAILQ_INIT(&nix->shaper_profile_list);
541 nix->tm_rate_min = 1E9; /* 1Gbps */
544 bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
545 bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
548 nix->schq_bmp_mem = bmp_mem;
550 /* Init contiguous and discontiguous bitmap per lvl */
552 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
553 /* Bitmap for discontiguous resource */
554 nix->schq_bmp[hw_lvl] =
555 plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
556 if (!nix->schq_bmp[hw_lvl])
559 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
561 /* Bitmap for contiguous resource */
562 nix->schq_contig_bmp[hw_lvl] =
563 plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
564 if (!nix->schq_contig_bmp[hw_lvl])
567 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
570 /* Disable TL1 Static Priority when VF's are enabled
571 * as otherwise VF's TL2 reallocation will be needed
572 * runtime to support a specific topology of PF.
574 if (nix->pci_dev->max_vfs)
575 nix->tm_flags |= NIX_TM_TL1_NO_SP;
577 /* TL1 access is only for PF's */
578 if (roc_nix_is_pf(roc_nix)) {
579 nix->tm_flags |= NIX_TM_TL1_ACCESS;
580 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
582 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
587 nix_tm_conf_fini(roc_nix);
592 nix_tm_conf_fini(struct roc_nix *roc_nix)
594 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
597 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
598 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
599 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
601 plt_free(nix->schq_bmp_mem);