1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 static inline struct mbox *
9 get_mbox(struct roc_nix *roc_nix)
11 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
12 struct dev *dev = &nix->dev;
18 nix_fc_rxchan_bpid_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
20 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
22 if (nix->chan_cnt != 0)
23 fc_cfg->rxchan_cfg.enable = true;
25 fc_cfg->rxchan_cfg.enable = false;
27 fc_cfg->type = ROC_NIX_FC_RXCHAN_CFG;
33 nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
35 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
36 struct mbox *mbox = get_mbox(roc_nix);
37 struct nix_bp_cfg_req *req;
38 struct nix_bp_cfg_rsp *rsp;
41 if (roc_nix_is_sdp(roc_nix))
45 req = mbox_alloc_msg_nix_bp_enable(mbox);
50 req->bpid_per_chan = 0;
52 rc = mbox_process_msg(mbox, (void *)&rsp);
53 if (rc || (req->chan_cnt != rsp->chan_cnt))
56 nix->bpid[0] = rsp->chan_bpid[0];
57 nix->chan_cnt = rsp->chan_cnt;
59 req = mbox_alloc_msg_nix_bp_disable(mbox);
65 rc = mbox_process(mbox);
69 memset(nix->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN);
73 if (roc_model_is_cn9k())
76 /* Enable backpressure on CPT if inline inb is enabled */
77 if (enable && roc_nix_inl_inb_is_enabled(roc_nix)) {
78 req = mbox_alloc_msg_nix_cpt_bp_enable(mbox);
83 req->bpid_per_chan = 0;
85 rc = mbox_process_msg(mbox, (void *)&rsp);
89 req = mbox_alloc_msg_nix_cpt_bp_disable(mbox);
94 req->bpid_per_chan = 0;
96 rc = mbox_process_msg(mbox, (void *)&rsp);
106 nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
108 struct mbox *mbox = get_mbox(roc_nix);
109 struct nix_aq_enq_rsp *rsp;
112 if (roc_model_is_cn9k()) {
113 struct nix_aq_enq_req *aq;
115 aq = mbox_alloc_msg_nix_aq_enq(mbox);
119 aq->qidx = fc_cfg->cq_cfg.rq;
120 aq->ctype = NIX_AQ_CTYPE_CQ;
121 aq->op = NIX_AQ_INSTOP_READ;
123 struct nix_cn10k_aq_enq_req *aq;
125 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
129 aq->qidx = fc_cfg->cq_cfg.rq;
130 aq->ctype = NIX_AQ_CTYPE_CQ;
131 aq->op = NIX_AQ_INSTOP_READ;
134 rc = mbox_process_msg(mbox, (void *)&rsp);
138 fc_cfg->cq_cfg.cq_drop = rsp->cq.bp;
139 fc_cfg->cq_cfg.enable = rsp->cq.bp_ena;
140 fc_cfg->type = ROC_NIX_FC_CQ_CFG;
147 nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
149 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
150 struct mbox *mbox = get_mbox(roc_nix);
152 if (roc_model_is_cn9k()) {
153 struct nix_aq_enq_req *aq;
155 aq = mbox_alloc_msg_nix_aq_enq(mbox);
159 aq->qidx = fc_cfg->cq_cfg.rq;
160 aq->ctype = NIX_AQ_CTYPE_CQ;
161 aq->op = NIX_AQ_INSTOP_WRITE;
163 if (fc_cfg->cq_cfg.enable) {
164 aq->cq.bpid = nix->bpid[0];
165 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
166 aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
167 aq->cq_mask.bp = ~(aq->cq_mask.bp);
170 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable);
171 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
173 struct nix_cn10k_aq_enq_req *aq;
175 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
179 aq->qidx = fc_cfg->cq_cfg.rq;
180 aq->ctype = NIX_AQ_CTYPE_CQ;
181 aq->op = NIX_AQ_INSTOP_WRITE;
183 if (fc_cfg->cq_cfg.enable) {
184 aq->cq.bpid = nix->bpid[0];
185 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
186 aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
187 aq->cq_mask.bp = ~(aq->cq_mask.bp);
190 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable);
191 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
194 return mbox_process(mbox);
198 roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
200 if (roc_nix_is_vf_or_sdp(roc_nix) && !roc_nix_is_lbk(roc_nix))
203 if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
204 return nix_fc_cq_config_get(roc_nix, fc_cfg);
205 else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
206 return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg);
207 else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
208 return nix_tm_bp_config_get(roc_nix, &fc_cfg->tm_cfg.enable);
214 roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
216 if (roc_nix_is_vf_or_sdp(roc_nix) && !roc_nix_is_lbk(roc_nix))
219 if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
220 return nix_fc_cq_config_set(roc_nix, fc_cfg);
221 else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
222 return nix_fc_rxchan_bpid_set(roc_nix,
223 fc_cfg->rxchan_cfg.enable);
224 else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
225 return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
231 roc_nix_fc_mode_get(struct roc_nix *roc_nix)
233 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
234 struct mbox *mbox = get_mbox(roc_nix);
235 struct cgx_pause_frm_cfg *req, *rsp;
236 enum roc_nix_fc_mode mode;
239 /* Flow control on LBK link is always available */
240 if (roc_nix_is_lbk(roc_nix)) {
241 if (nix->tx_pause && nix->rx_pause)
242 return ROC_NIX_FC_FULL;
243 else if (nix->rx_pause)
244 return ROC_NIX_FC_RX;
245 else if (nix->tx_pause)
246 return ROC_NIX_FC_TX;
248 return ROC_NIX_FC_NONE;
251 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
256 rc = mbox_process_msg(mbox, (void *)&rsp);
260 if (rsp->rx_pause && rsp->tx_pause)
261 mode = ROC_NIX_FC_FULL;
262 else if (rsp->rx_pause)
263 mode = ROC_NIX_FC_RX;
264 else if (rsp->tx_pause)
265 mode = ROC_NIX_FC_TX;
267 mode = ROC_NIX_FC_NONE;
269 nix->rx_pause = rsp->rx_pause;
270 nix->tx_pause = rsp->tx_pause;
274 return ROC_NIX_FC_NONE;
278 roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
280 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
281 struct mbox *mbox = get_mbox(roc_nix);
282 struct cgx_pause_frm_cfg *req;
283 uint8_t tx_pause, rx_pause;
286 rx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_RX);
287 tx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_TX);
289 /* Nothing much to do for LBK links */
290 if (roc_nix_is_lbk(roc_nix)) {
291 nix->rx_pause = rx_pause;
292 nix->tx_pause = tx_pause;
296 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
300 req->rx_pause = rx_pause;
301 req->tx_pause = tx_pause;
303 rc = mbox_process(mbox);
307 nix->rx_pause = rx_pause;
308 nix->tx_pause = tx_pause;
315 rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
318 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
319 struct npa_lf *lf = idev_npa_obj_get();
320 struct npa_aq_enq_req *req;
321 struct npa_aq_enq_rsp *rsp;
326 if (roc_nix_is_sdp(roc_nix))
333 req = mbox_alloc_msg_npa_aq_enq(mbox);
337 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
338 req->ctype = NPA_AQ_CTYPE_AURA;
339 req->op = NPA_AQ_INSTOP_READ;
341 rc = mbox_process_msg(mbox, (void *)&rsp);
345 limit = rsp->aura.limit;
346 /* BP is already enabled. */
347 if (rsp->aura.bp_ena) {
351 nix1 = !!(rsp->aura.bp_ena & 0x2);
353 bpid = rsp->aura.nix1_bpid;
355 bpid = rsp->aura.nix0_bpid;
357 /* If BP ids don't match disable BP. */
358 if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[0])) &&
360 req = mbox_alloc_msg_npa_aq_enq(mbox);
364 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
365 req->ctype = NPA_AQ_CTYPE_AURA;
366 req->op = NPA_AQ_INSTOP_WRITE;
368 req->aura.bp_ena = 0;
369 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
376 /* BP was previously enabled but now disabled skip. */
380 req = mbox_alloc_msg_npa_aq_enq(mbox);
384 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
385 req->ctype = NPA_AQ_CTYPE_AURA;
386 req->op = NPA_AQ_INSTOP_WRITE;
390 req->aura.nix1_bpid = nix->bpid[0];
391 req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
393 req->aura.nix0_bpid = nix->bpid[0];
394 req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
396 req->aura.bp = NIX_RQ_AURA_THRESH(
397 limit > 128 ? 256 : limit); /* 95% of size*/
398 req->aura_mask.bp = ~(req->aura_mask.bp);
401 req->aura.bp_ena = (!!ena << nix->is_nix1);
402 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);