1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 static inline struct mbox *
9 get_mbox(struct roc_nix *roc_nix)
11 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
12 struct dev *dev = &nix->dev;
18 nix_fc_rxchan_bpid_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
20 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
22 if (nix->chan_cnt != 0)
23 fc_cfg->rxchan_cfg.enable = true;
25 fc_cfg->rxchan_cfg.enable = false;
27 fc_cfg->cq_cfg_valid = false;
33 nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable)
35 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
36 struct mbox *mbox = get_mbox(roc_nix);
37 struct nix_bp_cfg_req *req;
38 struct nix_bp_cfg_rsp *rsp;
41 if (roc_nix_is_sdp(roc_nix))
45 req = mbox_alloc_msg_nix_bp_enable(mbox);
50 req->bpid_per_chan = 0;
52 rc = mbox_process_msg(mbox, (void *)&rsp);
53 if (rc || (req->chan_cnt != rsp->chan_cnt))
56 nix->bpid[0] = rsp->chan_bpid[0];
57 nix->chan_cnt = rsp->chan_cnt;
59 req = mbox_alloc_msg_nix_bp_disable(mbox);
65 rc = mbox_process(mbox);
69 memset(nix->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN);
78 nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
80 struct mbox *mbox = get_mbox(roc_nix);
81 struct nix_aq_enq_rsp *rsp;
84 if (roc_model_is_cn9k()) {
85 struct nix_aq_enq_req *aq;
87 aq = mbox_alloc_msg_nix_aq_enq(mbox);
88 aq->qidx = fc_cfg->cq_cfg.rq;
89 aq->ctype = NIX_AQ_CTYPE_CQ;
90 aq->op = NIX_AQ_INSTOP_READ;
92 struct nix_cn10k_aq_enq_req *aq;
94 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
95 aq->qidx = fc_cfg->cq_cfg.rq;
96 aq->ctype = NIX_AQ_CTYPE_CQ;
97 aq->op = NIX_AQ_INSTOP_READ;
100 rc = mbox_process_msg(mbox, (void *)&rsp);
104 fc_cfg->cq_cfg.cq_drop = rsp->cq.bp;
105 fc_cfg->cq_cfg.enable = rsp->cq.bp_ena;
106 fc_cfg->cq_cfg_valid = true;
113 nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
115 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
116 struct mbox *mbox = get_mbox(roc_nix);
118 if (roc_model_is_cn9k()) {
119 struct nix_aq_enq_req *aq;
121 aq = mbox_alloc_msg_nix_aq_enq(mbox);
122 aq->qidx = fc_cfg->cq_cfg.rq;
123 aq->ctype = NIX_AQ_CTYPE_CQ;
124 aq->op = NIX_AQ_INSTOP_WRITE;
126 if (fc_cfg->cq_cfg.enable) {
127 aq->cq.bpid = nix->bpid[0];
128 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
129 aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
130 aq->cq_mask.bp = ~(aq->cq_mask.bp);
133 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable);
134 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
136 struct nix_cn10k_aq_enq_req *aq;
138 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
139 aq->qidx = fc_cfg->cq_cfg.rq;
140 aq->ctype = NIX_AQ_CTYPE_CQ;
141 aq->op = NIX_AQ_INSTOP_WRITE;
143 if (fc_cfg->cq_cfg.enable) {
144 aq->cq.bpid = nix->bpid[0];
145 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
146 aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
147 aq->cq_mask.bp = ~(aq->cq_mask.bp);
150 aq->cq.bp_ena = !!(fc_cfg->cq_cfg.enable);
151 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
154 return mbox_process(mbox);
158 roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
160 if (roc_nix_is_vf_or_sdp(roc_nix))
163 if (fc_cfg->cq_cfg_valid)
164 return nix_fc_cq_config_get(roc_nix, fc_cfg);
166 return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg);
170 roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
172 if (roc_nix_is_vf_or_sdp(roc_nix))
175 if (fc_cfg->cq_cfg_valid)
176 return nix_fc_cq_config_set(roc_nix, fc_cfg);
178 return nix_fc_rxchan_bpid_set(roc_nix,
179 fc_cfg->rxchan_cfg.enable);
183 roc_nix_fc_mode_get(struct roc_nix *roc_nix)
185 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
186 struct mbox *mbox = get_mbox(roc_nix);
187 struct cgx_pause_frm_cfg *req, *rsp;
188 enum roc_nix_fc_mode mode;
191 if (roc_nix_is_lbk(roc_nix))
192 return ROC_NIX_FC_NONE;
194 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
199 rc = mbox_process_msg(mbox, (void *)&rsp);
203 if (rsp->rx_pause && rsp->tx_pause)
204 mode = ROC_NIX_FC_FULL;
205 else if (rsp->rx_pause)
206 mode = ROC_NIX_FC_RX;
207 else if (rsp->tx_pause)
208 mode = ROC_NIX_FC_TX;
210 mode = ROC_NIX_FC_NONE;
212 nix->rx_pause = rsp->rx_pause;
213 nix->tx_pause = rsp->tx_pause;
217 return ROC_NIX_FC_NONE;
221 roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode)
223 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
224 struct mbox *mbox = get_mbox(roc_nix);
225 struct cgx_pause_frm_cfg *req;
226 uint8_t tx_pause, rx_pause;
229 if (roc_nix_is_lbk(roc_nix))
230 return NIX_ERR_OP_NOTSUP;
232 rx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_RX);
233 tx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_TX);
235 req = mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
239 req->rx_pause = rx_pause;
240 req->tx_pause = tx_pause;
242 rc = mbox_process(mbox);
246 nix->rx_pause = rx_pause;
247 nix->tx_pause = tx_pause;
254 rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
257 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
258 struct npa_lf *lf = idev_npa_obj_get();
259 struct npa_aq_enq_req *req;
260 struct npa_aq_enq_rsp *rsp;
265 if (roc_nix_is_sdp(roc_nix))
272 req = mbox_alloc_msg_npa_aq_enq(mbox);
276 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
277 req->ctype = NPA_AQ_CTYPE_AURA;
278 req->op = NPA_AQ_INSTOP_READ;
280 rc = mbox_process_msg(mbox, (void *)&rsp);
284 limit = rsp->aura.limit;
285 /* BP is already enabled. */
286 if (rsp->aura.bp_ena) {
290 nix1 = !!(rsp->aura.bp_ena & 0x2);
292 bpid = rsp->aura.nix1_bpid;
294 bpid = rsp->aura.nix0_bpid;
296 /* If BP ids don't match disable BP. */
297 if (((nix1 != nix->is_nix1) || (bpid != nix->bpid[0])) &&
299 req = mbox_alloc_msg_npa_aq_enq(mbox);
303 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
304 req->ctype = NPA_AQ_CTYPE_AURA;
305 req->op = NPA_AQ_INSTOP_WRITE;
307 req->aura.bp_ena = 0;
308 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
315 /* BP was previously enabled but now disabled skip. */
319 req = mbox_alloc_msg_npa_aq_enq(mbox);
323 req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
324 req->ctype = NPA_AQ_CTYPE_AURA;
325 req->op = NPA_AQ_INSTOP_WRITE;
329 req->aura.nix1_bpid = nix->bpid[0];
330 req->aura_mask.nix1_bpid = ~(req->aura_mask.nix1_bpid);
332 req->aura.nix0_bpid = nix->bpid[0];
333 req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
335 req->aura.bp = NIX_RQ_AURA_THRESH(
336 limit > 128 ? 256 : limit); /* 95% of size*/
337 req->aura_mask.bp = ~(req->aura_mask.bp);
340 req->aura.bp_ena = (!!ena << nix->is_nix1);
341 req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);