1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
11 struct npa_aq_enq_req *req;
12 struct npa_aq_enq_rsp *rsp;
18 plt_tm_dbg("Setting SQ %u SQB aura FC to %s", sq->qid,
19 enable ? "enable" : "disable");
21 lf = idev_npa_obj_get();
23 return NPA_ERR_DEVICE_NOT_BOUNDED;
26 /* Set/clear sqb aura fc_ena */
27 aura_handle = sq->aura_handle;
28 req = mbox_alloc_msg_npa_aq_enq(mbox);
32 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
33 req->ctype = NPA_AQ_CTYPE_AURA;
34 req->op = NPA_AQ_INSTOP_WRITE;
35 /* Below is not needed for aura writes but AF driver needs it */
36 /* AF will translate to associated poolctx */
37 req->aura.pool_addr = req->aura_id;
39 req->aura.fc_ena = enable;
40 req->aura_mask.fc_ena = 1;
42 rc = mbox_process(mbox);
46 /* Read back npa aura ctx */
47 req = mbox_alloc_msg_npa_aq_enq(mbox);
51 req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
52 req->ctype = NPA_AQ_CTYPE_AURA;
53 req->op = NPA_AQ_INSTOP_READ;
55 rc = mbox_process_msg(mbox, (void *)&rsp);
59 /* Init when enabled as there might be no triggers */
61 *(volatile uint64_t *)sq->fc = rsp->aura.count;
63 *(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
64 /* Sync write barrier */
70 nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
71 struct nix_tm_shaper_profile *profile, int skip_ins)
73 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
74 uint64_t commit_rate, commit_sz;
75 uint64_t peak_rate, peak_sz;
79 commit_rate = profile->commit.rate;
80 commit_sz = profile->commit.size;
81 peak_rate = profile->peak.rate;
82 peak_sz = profile->peak.size;
84 if (nix_tm_shaper_profile_search(nix, id) && !skip_ins)
85 return NIX_ERR_TM_SHAPER_PROFILE_EXISTS;
87 if (profile->pkt_len_adj < NIX_TM_LENGTH_ADJUST_MIN ||
88 profile->pkt_len_adj > NIX_TM_LENGTH_ADJUST_MAX)
89 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
91 /* We cannot support both pkt length adjust and pkt mode */
92 if (profile->pkt_mode && profile->pkt_len_adj)
93 return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
95 /* commit rate and burst size can be enabled/disabled */
96 if (commit_rate || commit_sz) {
97 if (commit_sz < NIX_TM_MIN_SHAPER_BURST ||
98 commit_sz > NIX_TM_MAX_SHAPER_BURST)
99 return NIX_ERR_TM_INVALID_COMMIT_SZ;
100 else if (!nix_tm_shaper_rate_conv(commit_rate, NULL, NULL,
102 return NIX_ERR_TM_INVALID_COMMIT_RATE;
105 /* Peak rate and burst size can be enabled/disabled */
106 if (peak_sz || peak_rate) {
107 if (peak_sz < NIX_TM_MIN_SHAPER_BURST ||
108 peak_sz > NIX_TM_MAX_SHAPER_BURST)
109 return NIX_ERR_TM_INVALID_PEAK_SZ;
110 else if (!nix_tm_shaper_rate_conv(peak_rate, NULL, NULL, NULL))
111 return NIX_ERR_TM_INVALID_PEAK_RATE;
115 TAILQ_INSERT_TAIL(&nix->shaper_profile_list, profile, shaper);
117 plt_tm_dbg("Added TM shaper profile %u, "
118 " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
119 ", cbs %" PRIu64 " , adj %u, pkt_mode %u",
120 id, profile->peak.rate, profile->peak.size,
121 profile->commit.rate, profile->commit.size,
122 profile->pkt_len_adj, profile->pkt_mode);
124 /* Always use PIR for single rate shaping */
125 if (!peak_rate && commit_rate) {
126 profile->peak.rate = profile->commit.rate;
127 profile->peak.size = profile->commit.size;
128 profile->commit.rate = 0;
129 profile->commit.size = 0;
132 /* update min rate */
133 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
138 roc_nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
139 struct roc_nix_tm_shaper_profile *roc_profile)
141 struct nix_tm_shaper_profile *profile;
143 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
145 profile->ref_cnt = 0;
146 profile->id = roc_profile->id;
147 if (roc_profile->pkt_mode) {
148 /* Each packet accomulate single count, whereas HW
149 * considers each unit as Byte, so we need convert
152 profile->commit.rate = roc_profile->commit_rate * 8;
153 profile->peak.rate = roc_profile->peak_rate * 8;
155 profile->commit.rate = roc_profile->commit_rate;
156 profile->peak.rate = roc_profile->peak_rate;
158 profile->commit.size = roc_profile->commit_sz;
159 profile->peak.size = roc_profile->peak_sz;
160 profile->pkt_len_adj = roc_profile->pkt_len_adj;
161 profile->pkt_mode = roc_profile->pkt_mode;
162 profile->free_fn = roc_profile->free_fn;
164 return nix_tm_shaper_profile_add(roc_nix, profile, 0);
168 roc_nix_tm_shaper_profile_update(struct roc_nix *roc_nix,
169 struct roc_nix_tm_shaper_profile *roc_profile)
171 struct nix_tm_shaper_profile *profile;
173 profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
175 if (roc_profile->pkt_mode) {
176 /* Each packet accomulate single count, whereas HW
177 * considers each unit as Byte, so we need convert
180 profile->commit.rate = roc_profile->commit_rate * 8;
181 profile->peak.rate = roc_profile->peak_rate * 8;
183 profile->commit.rate = roc_profile->commit_rate;
184 profile->peak.rate = roc_profile->peak_rate;
186 profile->commit.size = roc_profile->commit_sz;
187 profile->peak.size = roc_profile->peak_sz;
189 return nix_tm_shaper_profile_add(roc_nix, profile, 1);
193 roc_nix_tm_shaper_profile_delete(struct roc_nix *roc_nix, uint32_t id)
195 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
196 struct nix_tm_shaper_profile *profile;
198 profile = nix_tm_shaper_profile_search(nix, id);
200 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
202 if (profile->ref_cnt)
203 return NIX_ERR_TM_SHAPER_PROFILE_IN_USE;
205 plt_tm_dbg("Removing TM shaper profile %u", id);
206 TAILQ_REMOVE(&nix->shaper_profile_list, profile, shaper);
207 nix_tm_shaper_profile_free(profile);
209 /* update min rate */
210 nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
215 roc_nix_tm_node_add(struct roc_nix *roc_nix, struct roc_nix_tm_node *roc_node)
217 struct nix_tm_node *node;
219 node = (struct nix_tm_node *)&roc_node->reserved;
220 node->id = roc_node->id;
221 node->priority = roc_node->priority;
222 node->weight = roc_node->weight;
223 node->lvl = roc_node->lvl;
224 node->parent_id = roc_node->parent_id;
225 node->shaper_profile_id = roc_node->shaper_profile_id;
226 node->pkt_mode = roc_node->pkt_mode;
227 node->pkt_mode_set = roc_node->pkt_mode_set;
228 node->free_fn = roc_node->free_fn;
229 node->tree = ROC_NIX_TM_USER;
231 return nix_tm_node_add(roc_nix, node);
235 roc_nix_tm_node_pkt_mode_update(struct roc_nix *roc_nix, uint32_t node_id,
238 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
239 struct nix_tm_node *node, *child;
240 struct nix_tm_node_list *list;
241 int num_children = 0;
243 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
245 return NIX_ERR_TM_INVALID_NODE;
247 if (node->pkt_mode == pkt_mode) {
248 node->pkt_mode_set = true;
252 /* Check for any existing children, if there are any,
253 * then we cannot update the pkt mode as children's quantum
254 * are already taken in.
256 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
257 TAILQ_FOREACH(child, list, node) {
258 if (child->parent == node)
262 /* Cannot update mode if it has children or tree is enabled */
263 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) && num_children)
266 if (node->pkt_mode_set && num_children)
267 return NIX_ERR_TM_PKT_MODE_MISMATCH;
269 node->pkt_mode = pkt_mode;
270 node->pkt_mode_set = true;
276 roc_nix_tm_node_name_get(struct roc_nix *roc_nix, uint32_t node_id, char *buf,
279 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
280 struct nix_tm_node *node;
282 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
284 plt_strlcpy(buf, "???", buflen);
285 return NIX_ERR_TM_INVALID_NODE;
288 if (node->hw_lvl == NIX_TXSCH_LVL_CNT)
289 snprintf(buf, buflen, "SQ_%d", node->id);
291 snprintf(buf, buflen, "%s_%d", nix_tm_hwlvl2str(node->hw_lvl),
297 roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)
299 return nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);