1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 static inline struct mbox *
9 get_mbox(struct roc_nix *roc_nix)
11 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
12 struct dev *dev = &nix->dev;
18 nix_fc_rxchan_bpid_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
20 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
22 if (nix->chan_cnt != 0)
23 fc_cfg->rxchan_cfg.enable = true;
25 fc_cfg->rxchan_cfg.enable = false;
27 fc_cfg->type = ROC_NIX_FC_RXCHAN_CFG;
33 nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
35 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
36 struct mbox *mbox = get_mbox(roc_nix);
37 struct nix_bp_cfg_req *req;
38 struct nix_bp_cfg_rsp *rsp;
41 if (roc_nix_is_sdp(roc_nix))
45 req = mbox_alloc_msg_nix_bp_enable(mbox);
50 if (roc_nix_is_lbk(roc_nix))
51 req->chan_cnt = NIX_LBK_MAX_CHAN;
53 req->chan_cnt = NIX_CGX_MAX_CHAN;
55 req->bpid_per_chan = true;
57 rc = mbox_process_msg(mbox, (void *)&rsp);
58 if (rc || (req->chan_cnt != rsp->chan_cnt))
61 nix->chan_cnt = rsp->chan_cnt;
62 for (i = 0; i < rsp->chan_cnt; i++)
63 nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
65 req = mbox_alloc_msg_nix_bp_disable(mbox);
69 req->chan_cnt = nix->chan_cnt;
71 rc = mbox_process(mbox);
75 memset(nix->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN);
79 if (roc_model_is_cn9k())
82 /* Enable backpressure on CPT if inline inb is enabled */
83 if (enable && roc_nix_inl_inb_is_enabled(roc_nix)) {
84 req = mbox_alloc_msg_nix_cpt_bp_enable(mbox);
89 req->bpid_per_chan = 0;
91 rc = mbox_process_msg(mbox, (void *)&rsp);
95 req = mbox_alloc_msg_nix_cpt_bp_disable(mbox);
100 req->bpid_per_chan = 0;
102 rc = mbox_process_msg(mbox, (void *)&rsp);
112 nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
114 struct mbox *mbox = get_mbox(roc_nix);
115 struct nix_aq_enq_rsp *rsp;
118 if (roc_model_is_cn9k()) {
119 struct nix_aq_enq_req *aq;
121 aq = mbox_alloc_msg_nix_aq_enq(mbox);
125 aq->qidx = fc_cfg->cq_cfg.rq;
126 aq->ctype = NIX_AQ_CTYPE_CQ;
127 aq->op = NIX_AQ_INSTOP_READ;
129 struct nix_cn10k_aq_enq_req *aq;
131 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
135 aq->qidx = fc_cfg->cq_cfg.rq;
136 aq->ctype = NIX_AQ_CTYPE_CQ;
137 aq->op = NIX_AQ_INSTOP_READ;
140 rc = mbox_process_msg(mbox, (void *)&rsp);
144 fc_cfg->cq_cfg.cq_drop = rsp->cq.bp;
145 fc_cfg->cq_cfg.enable = rsp->cq.bp_ena;
146 fc_cfg->type = ROC_NIX_FC_CQ_CFG;
153 nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
155 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
156 struct mbox *mbox = get_mbox(roc_nix);
158 if (roc_model_is_cn9k()) {
159 struct nix_aq_enq_req *aq;
161 aq = mbox_alloc_msg_nix_aq_enq(mbox);
165 aq->qidx = fc_cfg->cq_cfg.rq;
166 aq->ctype = NIX_AQ_CTYPE_CQ;
167 aq->op = NIX_AQ_INSTOP_WRITE;
169 if (fc_cfg->cq_cfg.enable) {
170 aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
171 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
172 aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
173 aq->cq_mask.bp = ~(aq->cq_mask.bp);
176 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable);
177 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
179 struct nix_cn10k_aq_enq_req *aq;
181 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
185 aq->qidx = fc_cfg->cq_cfg.rq;
186 aq->ctype = NIX_AQ_CTYPE_CQ;
187 aq->op = NIX_AQ_INSTOP_WRITE;
189 if (fc_cfg->cq_cfg.enable) {
190 aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
191 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
192 aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
193 aq->cq_mask.bp = ~(aq->cq_mask.bp);
196 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable);
197 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
200 return mbox_process(mbox);
204 roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
206 if (roc_nix_is_vf_or_sdp(roc_nix) && !roc_nix_is_lbk(roc_nix))
209 if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
210 return nix_fc_cq_config_get(roc_nix, fc_cfg);
211 else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
212 return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg);
213 else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
214 return nix_tm_bp_config_get(roc_nix, &fc_cfg->tm_cfg.enable);
220 roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
222 if (roc_nix_is_vf_or_sdp(roc_nix) && !roc_nix_is_lbk(roc_nix))
225 if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
226 return nix_fc_cq_config_set(roc_nix, fc_cfg);
227 else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
228 return nix_fc_rxchan_bpid_set(roc_nix,
229 fc_cfg->rxchan_cfg.enable);
230 else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
231 return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
233 fc_cfg->tm_cfg.enable);
239 roc_nix_fc_mode_get(struct roc_nix *roc_nix)
241 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
242 struct mbox *mbox = get_mbox(roc_nix);
243 struct cgx_pause_frm_cfg *req, *rsp;
244 enum roc_nix_fc_mode mode;
247 /* Flow control on LBK link is always available */
248 if (roc_nix_is_lbk(roc_nix)) {
249 if (nix->tx_pause && nix->rx_pause)
250 return ROC_NIX_FC_FULL;
251 else if (nix->rx_pause)
252 return ROC_NIX_FC_RX;
253 else if (nix->tx_pause)
254 return ROC_NIX_FC_TX;
256 return ROC_NIX_FC_NONE;
259 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
264 rc = mbox_process_msg(mbox, (void *)&rsp);
268 if (rsp->rx_pause && rsp->tx_pause)
269 mode = ROC_NIX_FC_FULL;
270 else if (rsp->rx_pause)
271 mode = ROC_NIX_FC_RX;
272 else if (rsp->tx_pause)
273 mode = ROC_NIX_FC_TX;
275 mode = ROC_NIX_FC_NONE;
277 nix->rx_pause = rsp->rx_pause;
278 nix->tx_pause = rsp->tx_pause;
282 return ROC_NIX_FC_NONE;
286 roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
288 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
289 struct mbox *mbox = get_mbox(roc_nix);
290 struct cgx_pause_frm_cfg *req;
291 uint8_t tx_pause, rx_pause;
294 rx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_RX);
295 tx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_TX);
297 /* Nothing much to do for LBK links */
298 if (roc_nix_is_lbk(roc_nix)) {
299 nix->rx_pause = rx_pause;
300 nix->tx_pause = tx_pause;
304 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
308 req->rx_pause = rx_pause;
309 req->tx_pause = tx_pause;
311 rc = mbox_process(mbox);
315 nix->rx_pause = rx_pause;
316 nix->tx_pause = tx_pause;
323 rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
326 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
327 struct npa_lf *lf = idev_npa_obj_get();
328 struct npa_aq_enq_req *req;
329 struct npa_aq_enq_rsp *rsp;
334 if (roc_nix_is_sdp(roc_nix))
341 req = mbox_alloc_msg_npa_aq_enq(mbox);
345 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
346 req->ctype = NPA_AQ_CTYPE_AURA;
347 req->op = NPA_AQ_INSTOP_READ;
349 rc = mbox_process_msg(mbox, (void *)&rsp);
353 limit = rsp->aura.limit;
354 /* BP is already enabled. */
355 if (rsp->aura.bp_ena) {
359 nix1 = !!(rsp->aura.bp_ena & 0x2);
361 bpid = rsp->aura.nix1_bpid;
363 bpid = rsp->aura.nix0_bpid;
365 /* If BP ids don't match disable BP. */
366 if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[0])) &&
368 req = mbox_alloc_msg_npa_aq_enq(mbox);
372 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
373 req->ctype = NPA_AQ_CTYPE_AURA;
374 req->op = NPA_AQ_INSTOP_WRITE;
376 req->aura.bp_ena = 0;
377 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
384 /* BP was previously enabled but now disabled skip. */
388 req = mbox_alloc_msg_npa_aq_enq(mbox);
392 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
393 req->ctype = NPA_AQ_CTYPE_AURA;
394 req->op = NPA_AQ_INSTOP_WRITE;
398 req->aura.nix1_bpid = nix->bpid[0];
399 req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
401 req->aura.nix0_bpid = nix->bpid[0];
402 req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
404 req->aura.bp = NIX_RQ_AURA_THRESH(
405 limit > 128 ? 256 : limit); /* 95% of size*/
406 req->aura_mask.bp = ~(req->aura_mask.bp);
409 req->aura.bp_ena = (!!ena << nix->is_nix1);
410 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
416 roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
418 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
419 struct mbox *mbox = get_mbox(roc_nix);
420 uint8_t tx_pause, rx_pause;
421 struct cgx_pfc_cfg *req;
422 struct cgx_pfc_rsp *rsp;
425 if (roc_nix_is_lbk(roc_nix))
426 return NIX_ERR_OP_NOTSUP;
428 rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
429 (pfc_cfg->mode == ROC_NIX_FC_RX);
430 tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
431 (pfc_cfg->mode == ROC_NIX_FC_TX);
433 req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
437 req->pfc_en = pfc_cfg->tc;
438 req->rx_pause = rx_pause;
439 req->tx_pause = tx_pause;
441 rc = mbox_process_msg(mbox, (void *)&rsp);
445 nix->rx_pause = rsp->rx_pause;
446 nix->tx_pause = rsp->tx_pause;
448 nix->cev |= BIT(pfc_cfg->tc);
450 nix->cev &= ~BIT(pfc_cfg->tc);
457 roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
459 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
461 if (roc_nix_is_lbk(roc_nix))
462 return NIX_ERR_OP_NOTSUP;
464 pfc_cfg->tc = nix->cev;
466 if (nix->rx_pause && nix->tx_pause)
467 pfc_cfg->mode = ROC_NIX_FC_FULL;
468 else if (nix->rx_pause)
469 pfc_cfg->mode = ROC_NIX_FC_RX;
470 else if (nix->tx_pause)
471 pfc_cfg->mode = ROC_NIX_FC_TX;
473 pfc_cfg->mode = ROC_NIX_FC_NONE;
479 roc_nix_chan_count_get(struct roc_nix *roc_nix)
481 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
483 return nix->chan_cnt;