1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 static inline struct mbox *
9 get_mbox(struct roc_nix *roc_nix)
11 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
12 struct dev *dev = &nix->dev;
18 nix_fc_rxchan_bpid_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
20 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
22 if (nix->chan_cnt != 0)
23 fc_cfg->rxchan_cfg.enable = true;
25 fc_cfg->rxchan_cfg.enable = false;
27 fc_cfg->type = ROC_NIX_FC_RXCHAN_CFG;
33 nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
35 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
36 struct mbox *mbox = get_mbox(roc_nix);
37 struct nix_bp_cfg_req *req;
38 struct nix_bp_cfg_rsp *rsp;
42 req = mbox_alloc_msg_nix_bp_enable(mbox);
47 if (roc_nix_is_lbk(roc_nix) || roc_nix_is_sdp(roc_nix))
48 req->chan_cnt = NIX_LBK_MAX_CHAN;
50 req->chan_cnt = NIX_CGX_MAX_CHAN;
52 req->bpid_per_chan = true;
54 rc = mbox_process_msg(mbox, (void *)&rsp);
55 if (rc || (req->chan_cnt != rsp->chan_cnt))
58 nix->chan_cnt = rsp->chan_cnt;
59 for (i = 0; i < rsp->chan_cnt; i++)
60 nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
62 req = mbox_alloc_msg_nix_bp_disable(mbox);
66 req->chan_cnt = nix->chan_cnt;
68 rc = mbox_process(mbox);
72 memset(nix->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN);
76 if (roc_model_is_cn9k())
79 /* Enable backpressure on CPT if inline inb is enabled */
80 if (enable && roc_nix_inl_inb_is_enabled(roc_nix)) {
81 req = mbox_alloc_msg_nix_cpt_bp_enable(mbox);
86 req->bpid_per_chan = 0;
88 rc = mbox_process_msg(mbox, (void *)&rsp);
92 req = mbox_alloc_msg_nix_cpt_bp_disable(mbox);
97 req->bpid_per_chan = 0;
99 rc = mbox_process_msg(mbox, (void *)&rsp);
109 nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
111 struct mbox *mbox = get_mbox(roc_nix);
112 struct nix_aq_enq_rsp *rsp;
115 if (roc_model_is_cn9k()) {
116 struct nix_aq_enq_req *aq;
118 aq = mbox_alloc_msg_nix_aq_enq(mbox);
122 aq->qidx = fc_cfg->cq_cfg.rq;
123 aq->ctype = NIX_AQ_CTYPE_CQ;
124 aq->op = NIX_AQ_INSTOP_READ;
126 struct nix_cn10k_aq_enq_req *aq;
128 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
132 aq->qidx = fc_cfg->cq_cfg.rq;
133 aq->ctype = NIX_AQ_CTYPE_CQ;
134 aq->op = NIX_AQ_INSTOP_READ;
137 rc = mbox_process_msg(mbox, (void *)&rsp);
141 fc_cfg->cq_cfg.cq_drop = rsp->cq.bp;
142 fc_cfg->cq_cfg.enable = rsp->cq.bp_ena;
143 fc_cfg->type = ROC_NIX_FC_CQ_CFG;
150 nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
152 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
153 struct mbox *mbox = get_mbox(roc_nix);
155 if (roc_model_is_cn9k()) {
156 struct nix_aq_enq_req *aq;
158 aq = mbox_alloc_msg_nix_aq_enq(mbox);
162 aq->qidx = fc_cfg->cq_cfg.rq;
163 aq->ctype = NIX_AQ_CTYPE_CQ;
164 aq->op = NIX_AQ_INSTOP_WRITE;
166 if (fc_cfg->cq_cfg.enable) {
167 aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
168 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
169 aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
170 aq->cq_mask.bp = ~(aq->cq_mask.bp);
173 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable);
174 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
176 struct nix_cn10k_aq_enq_req *aq;
178 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
182 aq->qidx = fc_cfg->cq_cfg.rq;
183 aq->ctype = NIX_AQ_CTYPE_CQ;
184 aq->op = NIX_AQ_INSTOP_WRITE;
186 if (fc_cfg->cq_cfg.enable) {
187 aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
188 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
189 aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
190 aq->cq_mask.bp = ~(aq->cq_mask.bp);
193 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable);
194 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
197 return mbox_process(mbox);
201 roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
203 if (!roc_nix_is_pf(roc_nix) && !roc_nix_is_lbk(roc_nix) &&
204 !roc_nix_is_sdp(roc_nix))
207 if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
208 return nix_fc_cq_config_get(roc_nix, fc_cfg);
209 else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
210 return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg);
211 else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
212 return nix_tm_bp_config_get(roc_nix, &fc_cfg->tm_cfg.enable);
218 roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
220 if (!roc_nix_is_pf(roc_nix) && !roc_nix_is_lbk(roc_nix) &&
221 !roc_nix_is_sdp(roc_nix))
224 if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
225 return nix_fc_cq_config_set(roc_nix, fc_cfg);
226 else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
227 return nix_fc_rxchan_bpid_set(roc_nix,
228 fc_cfg->rxchan_cfg.enable);
229 else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
230 return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
232 fc_cfg->tm_cfg.enable);
238 roc_nix_fc_mode_get(struct roc_nix *roc_nix)
240 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
241 struct mbox *mbox = get_mbox(roc_nix);
242 struct cgx_pause_frm_cfg *req, *rsp;
243 enum roc_nix_fc_mode mode;
246 /* Flow control on LBK link is always available */
247 if (roc_nix_is_lbk(roc_nix)) {
248 if (nix->tx_pause && nix->rx_pause)
249 return ROC_NIX_FC_FULL;
250 else if (nix->rx_pause)
251 return ROC_NIX_FC_RX;
252 else if (nix->tx_pause)
253 return ROC_NIX_FC_TX;
255 return ROC_NIX_FC_NONE;
258 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
263 rc = mbox_process_msg(mbox, (void *)&rsp);
267 if (rsp->rx_pause && rsp->tx_pause)
268 mode = ROC_NIX_FC_FULL;
269 else if (rsp->rx_pause)
270 mode = ROC_NIX_FC_RX;
271 else if (rsp->tx_pause)
272 mode = ROC_NIX_FC_TX;
274 mode = ROC_NIX_FC_NONE;
276 nix->rx_pause = rsp->rx_pause;
277 nix->tx_pause = rsp->tx_pause;
281 return ROC_NIX_FC_NONE;
285 roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
287 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
288 struct mbox *mbox = get_mbox(roc_nix);
289 struct cgx_pause_frm_cfg *req;
290 uint8_t tx_pause, rx_pause;
293 rx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_RX);
294 tx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_TX);
296 /* Nothing much to do for LBK links */
297 if (roc_nix_is_lbk(roc_nix)) {
298 nix->rx_pause = rx_pause;
299 nix->tx_pause = tx_pause;
303 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
307 req->rx_pause = rx_pause;
308 req->tx_pause = tx_pause;
310 rc = mbox_process(mbox);
314 nix->rx_pause = rx_pause;
315 nix->tx_pause = tx_pause;
322 rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
325 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
326 struct npa_lf *lf = idev_npa_obj_get();
327 struct npa_aq_enq_req *req;
328 struct npa_aq_enq_rsp *rsp;
333 if (roc_nix_is_sdp(roc_nix))
340 req = mbox_alloc_msg_npa_aq_enq(mbox);
344 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
345 req->ctype = NPA_AQ_CTYPE_AURA;
346 req->op = NPA_AQ_INSTOP_READ;
348 rc = mbox_process_msg(mbox, (void *)&rsp);
352 limit = rsp->aura.limit;
353 /* BP is already enabled. */
354 if (rsp->aura.bp_ena) {
358 nix1 = !!(rsp->aura.bp_ena & 0x2);
360 bpid = rsp->aura.nix1_bpid;
362 bpid = rsp->aura.nix0_bpid;
364 /* If BP ids don't match disable BP. */
365 if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[0])) &&
367 req = mbox_alloc_msg_npa_aq_enq(mbox);
371 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
372 req->ctype = NPA_AQ_CTYPE_AURA;
373 req->op = NPA_AQ_INSTOP_WRITE;
375 req->aura.bp_ena = 0;
376 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
383 /* BP was previously enabled but now disabled skip. */
387 req = mbox_alloc_msg_npa_aq_enq(mbox);
391 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
392 req->ctype = NPA_AQ_CTYPE_AURA;
393 req->op = NPA_AQ_INSTOP_WRITE;
397 req->aura.nix1_bpid = nix->bpid[0];
398 req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
400 req->aura.nix0_bpid = nix->bpid[0];
401 req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
403 req->aura.bp = NIX_RQ_AURA_THRESH(
404 limit > 128 ? 256 : limit); /* 95% of size*/
405 req->aura_mask.bp = ~(req->aura_mask.bp);
408 req->aura.bp_ena = (!!ena << nix->is_nix1);
409 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
415 roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
417 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
418 struct mbox *mbox = get_mbox(roc_nix);
419 uint8_t tx_pause, rx_pause;
420 struct cgx_pfc_cfg *req;
421 struct cgx_pfc_rsp *rsp;
424 if (roc_nix_is_lbk(roc_nix))
425 return NIX_ERR_OP_NOTSUP;
427 rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
428 (pfc_cfg->mode == ROC_NIX_FC_RX);
429 tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
430 (pfc_cfg->mode == ROC_NIX_FC_TX);
432 req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
436 req->pfc_en = pfc_cfg->tc;
437 req->rx_pause = rx_pause;
438 req->tx_pause = tx_pause;
440 rc = mbox_process_msg(mbox, (void *)&rsp);
444 nix->rx_pause = rsp->rx_pause;
445 nix->tx_pause = rsp->tx_pause;
447 nix->cev |= BIT(pfc_cfg->tc);
449 nix->cev &= ~BIT(pfc_cfg->tc);
456 roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
458 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
460 if (roc_nix_is_lbk(roc_nix))
461 return NIX_ERR_OP_NOTSUP;
463 pfc_cfg->tc = nix->cev;
465 if (nix->rx_pause && nix->tx_pause)
466 pfc_cfg->mode = ROC_NIX_FC_FULL;
467 else if (nix->rx_pause)
468 pfc_cfg->mode = ROC_NIX_FC_RX;
469 else if (nix->tx_pause)
470 pfc_cfg->mode = ROC_NIX_FC_TX;
472 pfc_cfg->mode = ROC_NIX_FC_NONE;
478 roc_nix_chan_count_get(struct roc_nix *roc_nix)
480 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
482 return nix->chan_cnt;