1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
12 struct mbox *mbox = (&nix->dev)->mbox;
13 struct nix_txschq_config *req;
14 struct nix_tm_node *p;
17 /* Enable nodes in path for flush to succeed */
18 if (!nix_tm_is_leaf(nix, node->lvl))
23 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
24 (p->flags & NIX_TM_NODE_HWRES)) {
25 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
27 req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
29 rc = mbox_process(mbox);
33 p->flags |= NIX_TM_NODE_ENABLED;
42 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
44 struct mbox *mbox = (&nix->dev)->mbox;
45 struct nix_txschq_config *req;
50 plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
51 enable ? "enable" : "disable");
53 rc = nix_tm_clear_path_xoff(nix, node);
57 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
58 req->lvl = NIX_TXSCH_LVL_SMQ;
61 req->reg[0] = NIX_AF_SMQX_CFG(smq);
62 req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
64 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
66 return mbox_process(mbox);
70 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
73 struct nix_tm_node *node;
76 node = nix_tm_node_search(nix, sq, nix->tm_tree);
78 /* Check if we found a valid leaf node */
79 if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
80 node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
84 /* Get SMQ Id of leaf node's parent */
85 *smq = node->parent->hw_id;
86 *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
88 rc = nix_tm_smq_xoff(nix, node->parent, false);
91 node->flags |= NIX_TM_NODE_ENABLED;
96 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
98 struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
99 uint16_t sqb_cnt, head_off, tail_off;
100 uint64_t wdata, val, prev;
101 uint16_t qid = sq->qid;
103 uint64_t timeout; /* 10's of usec */
105 /* Wait for enough time based on shaper min rate */
106 timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
107 /* Wait for worst case scenario of this SQ being last priority
108 * and so have to wait for all other SQ's drain out by their own.
110 timeout = timeout * nix->nb_tx_queues;
111 timeout = timeout / nix->tm_rate_min;
115 wdata = ((uint64_t)qid << 32);
116 regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
117 val = roc_atomic64_add_nosync(wdata, regaddr);
119 /* Spin multiple iterations as "sq->fc_cache_pkts" can still
120 * have space to send pkts even though fc_mem is disabled
126 val = roc_atomic64_add_nosync(wdata, regaddr);
127 /* Continue on error */
128 if (val & BIT_ULL(63))
134 sqb_cnt = val & 0xFFFF;
135 head_off = (val >> 20) & 0x3F;
136 tail_off = (val >> 28) & 0x3F;
138 /* SQ reached quiescent state */
139 if (sqb_cnt <= 1 && head_off == tail_off &&
140 (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
152 roc_nix_queues_ctx_dump(sq->roc_nix);
156 /* Flush and disable tx queue and its parent SMQ */
158 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
160 struct roc_nix *roc_nix = sq->roc_nix;
161 struct nix_tm_node *node, *sibling;
162 struct nix_tm_node_list *list;
163 enum roc_nix_tm_tree tree;
169 nix = roc_nix_to_nix_priv(roc_nix);
171 /* Need not do anything if tree is in disabled state */
172 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
175 mbox = (&nix->dev)->mbox;
179 list = nix_tm_node_list(nix, tree);
181 /* Find the node for this SQ */
182 node = nix_tm_node_search(nix, qid, tree);
183 if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
184 plt_err("Invalid node/state for sq %u", qid);
188 /* Enable CGX RXTX to drain pkts */
189 if (!roc_nix->io_enabled) {
190 /* Though it enables both RX MCAM Entries and CGX Link
191 * we assume all the rx queues are stopped way back.
193 mbox_alloc_msg_nix_lf_start_rx(mbox);
194 rc = mbox_process(mbox);
196 plt_err("cgx start failed, rc=%d", rc);
201 /* Disable smq xoff for case it was enabled earlier */
202 rc = nix_tm_smq_xoff(nix, node->parent, false);
204 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
209 /* As per HRM, to disable an SQ, all other SQ's
210 * that feed to same SMQ must be paused before SMQ flush.
212 TAILQ_FOREACH(sibling, list, node) {
213 if (sibling->parent != node->parent)
215 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
223 rc = roc_nix_tm_sq_aura_fc(sq, false);
225 plt_err("Failed to disable sqb aura fc, rc=%d", rc);
229 /* Wait for sq entries to be flushed */
230 rc = roc_nix_tm_sq_flush_spin(sq);
232 plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
237 node->flags &= ~NIX_TM_NODE_ENABLED;
239 /* Disable and flush */
240 rc = nix_tm_smq_xoff(nix, node->parent, true);
242 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
247 /* Restore cgx state */
248 if (!roc_nix->io_enabled) {
249 mbox_alloc_msg_nix_lf_stop_rx(mbox);
250 rc |= mbox_process(mbox);
257 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
259 struct roc_nix *roc_nix = sq->roc_nix;
260 struct nix_tm_node *node, *sibling;
261 struct nix_tm_node_list *list;
262 enum roc_nix_tm_tree tree;
263 struct roc_nix_sq *s_sq;
269 nix = roc_nix_to_nix_priv(roc_nix);
271 /* Need not do anything if tree is in disabled state */
272 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
277 list = nix_tm_node_list(nix, tree);
279 /* Find the node for this SQ */
280 node = nix_tm_node_search(nix, qid, tree);
282 plt_err("Invalid node for sq %u", qid);
286 /* Enable all the siblings back */
287 TAILQ_FOREACH(sibling, list, node) {
288 if (sibling->parent != node->parent)
291 if (sibling->id == qid)
294 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
298 s_sq = nix->sqs[s_qid];
303 /* Enable back if any SQ is still present */
304 rc = nix_tm_smq_xoff(nix, node->parent, false);
306 plt_err("Failed to enable smq %u, rc=%d",
307 node->parent->hw_id, rc);
313 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
315 plt_err("Failed to enable sqb aura fc, rc=%d", rc);
324 nix_tm_conf_init(struct roc_nix *roc_nix)
326 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
327 uint32_t bmp_sz, hw_lvl;
332 for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
333 TAILQ_INIT(&nix->trees[i]);
335 TAILQ_INIT(&nix->shaper_profile_list);
336 nix->tm_rate_min = 1E9; /* 1Gbps */
339 bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
340 bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
343 nix->schq_bmp_mem = bmp_mem;
345 /* Init contiguous and discontiguous bitmap per lvl */
347 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
348 /* Bitmap for discontiguous resource */
349 nix->schq_bmp[hw_lvl] =
350 plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
351 if (!nix->schq_bmp[hw_lvl])
354 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
356 /* Bitmap for contiguous resource */
357 nix->schq_contig_bmp[hw_lvl] =
358 plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
359 if (!nix->schq_contig_bmp[hw_lvl])
362 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
365 /* Disable TL1 Static Priority when VF's are enabled
366 * as otherwise VF's TL2 reallocation will be needed
367 * runtime to support a specific topology of PF.
369 if (nix->pci_dev->max_vfs)
370 nix->tm_flags |= NIX_TM_TL1_NO_SP;
372 /* TL1 access is only for PF's */
373 if (roc_nix_is_pf(roc_nix)) {
374 nix->tm_flags |= NIX_TM_TL1_ACCESS;
375 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
377 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
382 nix_tm_conf_fini(roc_nix);
387 nix_tm_conf_fini(struct roc_nix *roc_nix)
389 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
392 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
393 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
394 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
396 plt_free(nix->schq_bmp_mem);