1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 nix_qsize_to_val(enum nix_q_size qsize)
11 return (16UL << (qsize * 2));
14 static inline enum nix_q_size
15 nix_qsize_clampup(uint32_t val)
17 int i = nix_q_size_16;
19 for (; i < nix_q_size_max; i++)
20 if (val <= nix_qsize_to_val(i))
23 if (i >= nix_q_size_max)
24 i = nix_q_size_max - 1;
30 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
32 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
33 struct mbox *mbox = (&nix->dev)->mbox;
36 /* Pkts will be dropped silently if RQ is disabled */
37 if (roc_model_is_cn9k()) {
38 struct nix_aq_enq_req *aq;
40 aq = mbox_alloc_msg_nix_aq_enq(mbox);
42 aq->ctype = NIX_AQ_CTYPE_RQ;
43 aq->op = NIX_AQ_INSTOP_WRITE;
46 aq->rq_mask.ena = ~(aq->rq_mask.ena);
48 struct nix_cn10k_aq_enq_req *aq;
50 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
52 aq->ctype = NIX_AQ_CTYPE_RQ;
53 aq->op = NIX_AQ_INSTOP_WRITE;
56 aq->rq_mask.ena = ~(aq->rq_mask.ena);
59 rc = mbox_process(mbox);
61 if (roc_model_is_cn10k())
62 plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH);
67 rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
69 struct mbox *mbox = (&nix->dev)->mbox;
70 struct nix_aq_enq_req *aq;
72 aq = mbox_alloc_msg_nix_aq_enq(mbox);
74 aq->ctype = NIX_AQ_CTYPE_RQ;
75 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
80 aq->rq.sso_tt = rq->tt;
81 aq->rq.sso_grp = rq->hwgrp;
83 aq->rq.wqe_skip = rq->wqe_skip;
84 aq->rq.wqe_caching = 1;
86 aq->rq.good_utag = rq->tag_mask >> 24;
87 aq->rq.bad_utag = rq->tag_mask >> 24;
88 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
92 aq->rq.good_utag = rq->tag_mask >> 24;
93 aq->rq.bad_utag = rq->tag_mask >> 24;
94 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
99 aq->rq.ipsech_ena = 1;
102 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
104 /* Sizes must be aligned to 8 bytes */
105 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
108 /* Expressed in number of dwords */
109 aq->rq.first_skip = rq->first_skip / 8;
110 aq->rq.later_skip = rq->later_skip / 8;
111 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
112 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
113 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
115 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
116 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
117 aq->rq.rq_int_ena = 0;
118 /* Many to one reduction */
119 aq->rq.qint_idx = rq->qid % nix->qints;
120 aq->rq.xqe_drop_ena = 1;
125 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
126 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
127 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
128 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
129 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
130 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
131 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
132 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
133 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
136 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
137 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
138 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
139 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
140 aq->rq_mask.cq = ~aq->rq_mask.cq;
144 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
146 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
147 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
148 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
149 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
150 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
151 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
152 aq->rq_mask.ena = ~aq->rq_mask.ena;
153 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
154 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
155 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
156 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
157 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
164 rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
166 struct mbox *mbox = (&nix->dev)->mbox;
167 struct nix_cn10k_aq_enq_req *aq;
169 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
171 aq->ctype = NIX_AQ_CTYPE_RQ;
172 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
177 aq->rq.sso_tt = rq->tt;
178 aq->rq.sso_grp = rq->hwgrp;
180 aq->rq.wqe_skip = rq->wqe_skip;
181 aq->rq.wqe_caching = 1;
183 aq->rq.good_utag = rq->tag_mask >> 24;
184 aq->rq.bad_utag = rq->tag_mask >> 24;
185 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
188 aq->rq.vwqe_ena = true;
189 aq->rq.vwqe_skip = rq->vwqe_first_skip;
190 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
191 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
192 aq->rq.vtime_wait = rq->vwqe_wait_tmo;
193 aq->rq.wqe_aura = rq->vwqe_aura_handle;
198 aq->rq.good_utag = rq->tag_mask >> 24;
199 aq->rq.bad_utag = rq->tag_mask >> 24;
200 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
205 aq->rq.ipsech_ena = 1;
207 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
209 /* Sizes must be aligned to 8 bytes */
210 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
213 /* Expressed in number of dwords */
214 aq->rq.first_skip = rq->first_skip / 8;
215 aq->rq.later_skip = rq->later_skip / 8;
216 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
217 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
218 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
226 roc_npa_aura_handle_to_aura(rq->spb_aura_handle);
228 if (rq->spb_size & 0x7 ||
229 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE)
232 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */
233 spb_sizem1 -= 1; /* Expressed in size minus one */
234 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F;
235 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7;
240 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
241 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
242 aq->rq.rq_int_ena = 0;
243 /* Many to one reduction */
244 aq->rq.qint_idx = rq->qid % nix->qints;
245 aq->rq.xqe_drop_ena = 1;
250 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
251 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
252 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
253 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
254 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
255 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
256 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
257 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
258 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
260 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena;
261 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip;
262 aq->rq_mask.max_vsize_exp =
263 ~aq->rq_mask.max_vsize_exp;
264 aq->rq_mask.vtime_wait =
265 ~aq->rq_mask.vtime_wait;
266 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura;
270 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
271 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
272 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
273 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
274 aq->rq_mask.cq = ~aq->rq_mask.cq;
278 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
281 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura;
282 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1;
283 aq->rq_mask.spb_high_sizem1 =
284 ~aq->rq_mask.spb_high_sizem1;
287 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
288 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
289 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
290 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
291 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
292 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
293 aq->rq_mask.ena = ~aq->rq_mask.ena;
294 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
295 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
296 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
297 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
298 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
305 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
307 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
308 struct mbox *mbox = (&nix->dev)->mbox;
309 bool is_cn9k = roc_model_is_cn9k();
312 if (roc_nix == NULL || rq == NULL)
313 return NIX_ERR_PARAM;
315 if (rq->qid >= nix->nb_rx_queues)
316 return NIX_ERR_QUEUE_INVALID_RANGE;
318 rq->roc_nix = roc_nix;
321 rc = rq_cn9k_cfg(nix, rq, false, ena);
323 rc = rq_cfg(nix, rq, false, ena);
328 return mbox_process(mbox);
332 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
334 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
335 struct mbox *mbox = (&nix->dev)->mbox;
336 bool is_cn9k = roc_model_is_cn9k();
339 if (roc_nix == NULL || rq == NULL)
340 return NIX_ERR_PARAM;
342 if (rq->qid >= nix->nb_rx_queues)
343 return NIX_ERR_QUEUE_INVALID_RANGE;
345 rq->roc_nix = roc_nix;
348 rc = rq_cn9k_cfg(nix, rq, true, ena);
350 rc = rq_cfg(nix, rq, true, ena);
355 return mbox_process(mbox);
359 roc_nix_rq_fini(struct roc_nix_rq *rq)
361 /* Disabling RQ is sufficient */
362 return roc_nix_rq_ena_dis(rq, false);
366 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
368 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
369 struct mbox *mbox = (&nix->dev)->mbox;
370 volatile struct nix_cq_ctx_s *cq_ctx;
371 enum nix_q_size qsize;
376 return NIX_ERR_PARAM;
378 if (cq->qid >= nix->nb_rx_queues)
379 return NIX_ERR_QUEUE_INVALID_RANGE;
381 qsize = nix_qsize_clampup(cq->nb_desc);
382 cq->nb_desc = nix_qsize_to_val(qsize);
383 cq->qmask = cq->nb_desc - 1;
384 cq->door = nix->base + NIX_LF_CQ_OP_DOOR;
385 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
386 cq->wdata = (uint64_t)cq->qid << 32;
387 cq->roc_nix = roc_nix;
388 cq->drop_thresh = NIX_CQ_THRESH_LEVEL;
391 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ;
392 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN);
393 if (cq->desc_base == NULL) {
398 if (roc_model_is_cn9k()) {
399 struct nix_aq_enq_req *aq;
401 aq = mbox_alloc_msg_nix_aq_enq(mbox);
403 aq->ctype = NIX_AQ_CTYPE_CQ;
404 aq->op = NIX_AQ_INSTOP_INIT;
407 struct nix_cn10k_aq_enq_req *aq;
409 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
411 aq->ctype = NIX_AQ_CTYPE_CQ;
412 aq->op = NIX_AQ_INSTOP_INIT;
418 cq_ctx->qsize = qsize;
419 cq_ctx->base = (uint64_t)cq->desc_base;
420 cq_ctx->avg_level = 0xff;
421 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
422 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
424 /* Many to one reduction */
425 cq_ctx->qint_idx = cq->qid % nix->qints;
426 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
427 cq_ctx->cint_idx = cq->qid;
429 cq_ctx->drop = cq->drop_thresh;
430 cq_ctx->drop_ena = 1;
432 /* TX pause frames enable flow ctrl on RX side */
434 /* Single BPID is allocated for all rx channels for now */
435 cq_ctx->bpid = nix->bpid[0];
436 cq_ctx->bp = cq_ctx->drop;
440 rc = mbox_process(mbox);
447 plt_free(cq->desc_base);
453 roc_nix_cq_fini(struct roc_nix_cq *cq)
460 return NIX_ERR_PARAM;
462 nix = roc_nix_to_nix_priv(cq->roc_nix);
463 mbox = (&nix->dev)->mbox;
466 if (roc_model_is_cn9k()) {
467 struct nix_aq_enq_req *aq;
469 aq = mbox_alloc_msg_nix_aq_enq(mbox);
471 aq->ctype = NIX_AQ_CTYPE_CQ;
472 aq->op = NIX_AQ_INSTOP_WRITE;
475 aq->cq_mask.ena = ~aq->cq_mask.ena;
476 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
478 struct nix_cn10k_aq_enq_req *aq;
480 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
482 aq->ctype = NIX_AQ_CTYPE_CQ;
483 aq->op = NIX_AQ_INSTOP_WRITE;
486 aq->cq_mask.ena = ~aq->cq_mask.ena;
487 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
490 rc = mbox_process(mbox);
494 plt_free(cq->desc_base);