1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 static inline struct mbox *
9 get_mbox(struct roc_nix *roc_nix)
11 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
12 struct dev *dev = &nix->dev;
18 nix_fc_rxchan_bpid_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
20 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
22 if (nix->chan_cnt != 0)
23 fc_cfg->rxchan_cfg.enable = true;
25 fc_cfg->rxchan_cfg.enable = false;
27 fc_cfg->type = ROC_NIX_FC_RXCHAN_CFG;
33 nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
35 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
36 struct mbox *mbox = get_mbox(roc_nix);
37 struct nix_bp_cfg_req *req;
38 struct nix_bp_cfg_rsp *rsp;
41 if (roc_nix_is_sdp(roc_nix))
45 req = mbox_alloc_msg_nix_bp_enable(mbox);
50 req->bpid_per_chan = 0;
52 rc = mbox_process_msg(mbox, (void *)&rsp);
53 if (rc || (req->chan_cnt != rsp->chan_cnt))
56 nix->bpid[0] = rsp->chan_bpid[0];
57 nix->chan_cnt = rsp->chan_cnt;
59 req = mbox_alloc_msg_nix_bp_disable(mbox);
65 rc = mbox_process(mbox);
69 memset(nix->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN);
73 if (roc_model_is_cn9k())
76 /* Enable backpressure on CPT if inline inb is enabled */
77 if (enable && roc_nix_inl_inb_is_enabled(roc_nix)) {
78 req = mbox_alloc_msg_nix_cpt_bp_enable(mbox);
83 req->bpid_per_chan = 0;
85 rc = mbox_process_msg(mbox, (void *)&rsp);
89 req = mbox_alloc_msg_nix_cpt_bp_disable(mbox);
94 req->bpid_per_chan = 0;
96 rc = mbox_process_msg(mbox, (void *)&rsp);
106 nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
108 struct mbox *mbox = get_mbox(roc_nix);
109 struct nix_aq_enq_rsp *rsp;
112 if (roc_model_is_cn9k()) {
113 struct nix_aq_enq_req *aq;
115 aq = mbox_alloc_msg_nix_aq_enq(mbox);
116 aq->qidx = fc_cfg->cq_cfg.rq;
117 aq->ctype = NIX_AQ_CTYPE_CQ;
118 aq->op = NIX_AQ_INSTOP_READ;
120 struct nix_cn10k_aq_enq_req *aq;
122 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
123 aq->qidx = fc_cfg->cq_cfg.rq;
124 aq->ctype = NIX_AQ_CTYPE_CQ;
125 aq->op = NIX_AQ_INSTOP_READ;
128 rc = mbox_process_msg(mbox, (void *)&rsp);
132 fc_cfg->cq_cfg.cq_drop = rsp->cq.bp;
133 fc_cfg->cq_cfg.enable = rsp->cq.bp_ena;
134 fc_cfg->type = ROC_NIX_FC_CQ_CFG;
141 nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
143 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
144 struct mbox *mbox = get_mbox(roc_nix);
146 if (roc_model_is_cn9k()) {
147 struct nix_aq_enq_req *aq;
149 aq = mbox_alloc_msg_nix_aq_enq(mbox);
150 aq->qidx = fc_cfg->cq_cfg.rq;
151 aq->ctype = NIX_AQ_CTYPE_CQ;
152 aq->op = NIX_AQ_INSTOP_WRITE;
154 if (fc_cfg->cq_cfg.enable) {
155 aq->cq.bpid = nix->bpid[0];
156 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
157 aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
158 aq->cq_mask.bp = ~(aq->cq_mask.bp);
161 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable);
162 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
164 struct nix_cn10k_aq_enq_req *aq;
166 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
167 aq->qidx = fc_cfg->cq_cfg.rq;
168 aq->ctype = NIX_AQ_CTYPE_CQ;
169 aq->op = NIX_AQ_INSTOP_WRITE;
171 if (fc_cfg->cq_cfg.enable) {
172 aq->cq.bpid = nix->bpid[0];
173 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
174 aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
175 aq->cq_mask.bp = ~(aq->cq_mask.bp);
178 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable);
179 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
182 return mbox_process(mbox);
186 roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
188 if (roc_nix_is_vf_or_sdp(roc_nix) && !roc_nix_is_lbk(roc_nix))
191 if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
192 return nix_fc_cq_config_get(roc_nix, fc_cfg);
193 else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
194 return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg);
195 else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
196 return nix_tm_bp_config_get(roc_nix, &fc_cfg->tm_cfg.enable);
202 roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
204 if (roc_nix_is_vf_or_sdp(roc_nix) && !roc_nix_is_lbk(roc_nix))
207 if (fc_cfg->type == ROC_NIX_FC_CQ_CFG)
208 return nix_fc_cq_config_set(roc_nix, fc_cfg);
209 else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG)
210 return nix_fc_rxchan_bpid_set(roc_nix,
211 fc_cfg->rxchan_cfg.enable);
212 else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
213 return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
219 roc_nix_fc_mode_get(struct roc_nix *roc_nix)
221 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
222 struct mbox *mbox = get_mbox(roc_nix);
223 struct cgx_pause_frm_cfg *req, *rsp;
224 enum roc_nix_fc_mode mode;
227 /* Flow control on LBK link is always available */
228 if (roc_nix_is_lbk(roc_nix)) {
229 if (nix->tx_pause && nix->rx_pause)
230 return ROC_NIX_FC_FULL;
231 else if (nix->rx_pause)
232 return ROC_NIX_FC_RX;
233 else if (nix->tx_pause)
234 return ROC_NIX_FC_TX;
236 return ROC_NIX_FC_NONE;
239 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
244 rc = mbox_process_msg(mbox, (void *)&rsp);
248 if (rsp->rx_pause && rsp->tx_pause)
249 mode = ROC_NIX_FC_FULL;
250 else if (rsp->rx_pause)
251 mode = ROC_NIX_FC_RX;
252 else if (rsp->tx_pause)
253 mode = ROC_NIX_FC_TX;
255 mode = ROC_NIX_FC_NONE;
257 nix->rx_pause = rsp->rx_pause;
258 nix->tx_pause = rsp->tx_pause;
262 return ROC_NIX_FC_NONE;
266 roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
268 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
269 struct mbox *mbox = get_mbox(roc_nix);
270 struct cgx_pause_frm_cfg *req;
271 uint8_t tx_pause, rx_pause;
274 rx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_RX);
275 tx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_TX);
277 /* Nothing much to do for LBK links */
278 if (roc_nix_is_lbk(roc_nix)) {
279 nix->rx_pause = rx_pause;
280 nix->tx_pause = tx_pause;
284 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
288 req->rx_pause = rx_pause;
289 req->tx_pause = tx_pause;
291 rc = mbox_process(mbox);
295 nix->rx_pause = rx_pause;
296 nix->tx_pause = tx_pause;
303 rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
306 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
307 struct npa_lf *lf = idev_npa_obj_get();
308 struct npa_aq_enq_req *req;
309 struct npa_aq_enq_rsp *rsp;
314 if (roc_nix_is_sdp(roc_nix))
321 req = mbox_alloc_msg_npa_aq_enq(mbox);
325 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
326 req->ctype = NPA_AQ_CTYPE_AURA;
327 req->op = NPA_AQ_INSTOP_READ;
329 rc = mbox_process_msg(mbox, (void *)&rsp);
333 limit = rsp->aura.limit;
334 /* BP is already enabled. */
335 if (rsp->aura.bp_ena) {
339 nix1 = !!(rsp->aura.bp_ena & 0x2);
341 bpid = rsp->aura.nix1_bpid;
343 bpid = rsp->aura.nix0_bpid;
345 /* If BP ids don't match disable BP. */
346 if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[0])) &&
348 req = mbox_alloc_msg_npa_aq_enq(mbox);
352 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
353 req->ctype = NPA_AQ_CTYPE_AURA;
354 req->op = NPA_AQ_INSTOP_WRITE;
356 req->aura.bp_ena = 0;
357 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
364 /* BP was previously enabled but now disabled skip. */
368 req = mbox_alloc_msg_npa_aq_enq(mbox);
372 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
373 req->ctype = NPA_AQ_CTYPE_AURA;
374 req->op = NPA_AQ_INSTOP_WRITE;
378 req->aura.nix1_bpid = nix->bpid[0];
379 req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
381 req->aura.nix0_bpid = nix->bpid[0];
382 req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
384 req->aura.bp = NIX_RQ_AURA_THRESH(
385 limit > 128 ? 256 : limit); /* 95% of size*/
386 req->aura_mask.bp = ~(req->aura_mask.bp);
389 req->aura.bp_ena = (!!ena << nix->is_nix1);
390 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);