1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 nix_qsize_to_val(enum nix_q_size qsize)
11 return (16UL << (qsize * 2));
14 static inline enum nix_q_size
15 nix_qsize_clampup(uint32_t val)
17 int i = nix_q_size_16;
19 for (; i < nix_q_size_max; i++)
20 if (val <= nix_qsize_to_val(i))
23 if (i >= nix_q_size_max)
24 i = nix_q_size_max - 1;
30 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
32 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
33 struct mbox *mbox = (&nix->dev)->mbox;
36 /* Pkts will be dropped silently if RQ is disabled */
37 if (roc_model_is_cn9k()) {
38 struct nix_aq_enq_req *aq;
40 aq = mbox_alloc_msg_nix_aq_enq(mbox);
42 aq->ctype = NIX_AQ_CTYPE_RQ;
43 aq->op = NIX_AQ_INSTOP_WRITE;
46 aq->rq_mask.ena = ~(aq->rq_mask.ena);
48 struct nix_cn10k_aq_enq_req *aq;
50 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
52 aq->ctype = NIX_AQ_CTYPE_RQ;
53 aq->op = NIX_AQ_INSTOP_WRITE;
56 aq->rq_mask.ena = ~(aq->rq_mask.ena);
59 rc = mbox_process(mbox);
61 if (roc_model_is_cn10k())
62 plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH);
67 rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
69 struct mbox *mbox = (&nix->dev)->mbox;
70 struct nix_aq_enq_req *aq;
72 aq = mbox_alloc_msg_nix_aq_enq(mbox);
74 aq->ctype = NIX_AQ_CTYPE_RQ;
75 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
80 aq->rq.sso_tt = rq->tt;
81 aq->rq.sso_grp = rq->hwgrp;
83 aq->rq.wqe_skip = rq->wqe_skip;
84 aq->rq.wqe_caching = 1;
86 aq->rq.good_utag = rq->tag_mask >> 24;
87 aq->rq.bad_utag = rq->tag_mask >> 24;
88 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
92 aq->rq.good_utag = rq->tag_mask >> 24;
93 aq->rq.bad_utag = rq->tag_mask >> 24;
94 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
99 aq->rq.ipsech_ena = 1;
102 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
104 /* Sizes must be aligned to 8 bytes */
105 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
108 /* Expressed in number of dwords */
109 aq->rq.first_skip = rq->first_skip / 8;
110 aq->rq.later_skip = rq->later_skip / 8;
111 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
112 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
113 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
115 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
116 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
117 aq->rq.rq_int_ena = 0;
118 /* Many to one reduction */
119 aq->rq.qint_idx = rq->qid % nix->qints;
120 aq->rq.xqe_drop_ena = 1;
122 /* If RED enabled, then fill enable for all cases */
123 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
124 aq->rq.spb_aura_pass = rq->spb_red_pass;
125 aq->rq.lpb_aura_pass = rq->red_pass;
127 aq->rq.spb_aura_drop = rq->spb_red_drop;
128 aq->rq.lpb_aura_drop = rq->red_drop;
134 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
135 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
136 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
137 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
138 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
139 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
140 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
141 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
142 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
145 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
146 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
147 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
148 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
149 aq->rq_mask.cq = ~aq->rq_mask.cq;
153 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
155 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
156 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
157 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
158 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
159 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
160 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
161 aq->rq_mask.ena = ~aq->rq_mask.ena;
162 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
163 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
164 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
165 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
166 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
168 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
169 aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
170 aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
172 aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
173 aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
181 rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
183 struct mbox *mbox = (&nix->dev)->mbox;
184 struct nix_cn10k_aq_enq_req *aq;
186 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
188 aq->ctype = NIX_AQ_CTYPE_RQ;
189 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
194 aq->rq.sso_tt = rq->tt;
195 aq->rq.sso_grp = rq->hwgrp;
197 aq->rq.wqe_skip = rq->wqe_skip;
198 aq->rq.wqe_caching = 1;
200 aq->rq.good_utag = rq->tag_mask >> 24;
201 aq->rq.bad_utag = rq->tag_mask >> 24;
202 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
205 aq->rq.vwqe_ena = true;
206 aq->rq.vwqe_skip = rq->vwqe_first_skip;
207 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
208 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
209 aq->rq.vtime_wait = rq->vwqe_wait_tmo;
210 aq->rq.wqe_aura = rq->vwqe_aura_handle;
215 aq->rq.good_utag = rq->tag_mask >> 24;
216 aq->rq.bad_utag = rq->tag_mask >> 24;
217 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
222 aq->rq.ipsech_ena = 1;
224 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
226 /* Sizes must be aligned to 8 bytes */
227 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
230 /* Expressed in number of dwords */
231 aq->rq.first_skip = rq->first_skip / 8;
232 aq->rq.later_skip = rq->later_skip / 8;
233 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
234 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
235 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
243 roc_npa_aura_handle_to_aura(rq->spb_aura_handle);
245 if (rq->spb_size & 0x7 ||
246 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE)
249 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */
250 spb_sizem1 -= 1; /* Expressed in size minus one */
251 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F;
252 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7;
257 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
258 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
259 aq->rq.rq_int_ena = 0;
260 /* Many to one reduction */
261 aq->rq.qint_idx = rq->qid % nix->qints;
262 aq->rq.xqe_drop_ena = 1;
264 /* If RED enabled, then fill enable for all cases */
265 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
266 aq->rq.spb_pool_pass = rq->red_pass;
267 aq->rq.spb_aura_pass = rq->red_pass;
268 aq->rq.lpb_pool_pass = rq->red_pass;
269 aq->rq.lpb_aura_pass = rq->red_pass;
270 aq->rq.wqe_pool_pass = rq->red_pass;
271 aq->rq.xqe_pass = rq->red_pass;
273 aq->rq.spb_pool_drop = rq->red_drop;
274 aq->rq.spb_aura_drop = rq->red_drop;
275 aq->rq.lpb_pool_drop = rq->red_drop;
276 aq->rq.lpb_aura_drop = rq->red_drop;
277 aq->rq.wqe_pool_drop = rq->red_drop;
278 aq->rq.xqe_drop = rq->red_drop;
284 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
285 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
286 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
287 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
288 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
289 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
290 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
291 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
292 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
294 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena;
295 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip;
296 aq->rq_mask.max_vsize_exp =
297 ~aq->rq_mask.max_vsize_exp;
298 aq->rq_mask.vtime_wait =
299 ~aq->rq_mask.vtime_wait;
300 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura;
304 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
305 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
306 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
307 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
308 aq->rq_mask.cq = ~aq->rq_mask.cq;
312 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
315 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura;
316 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1;
317 aq->rq_mask.spb_high_sizem1 =
318 ~aq->rq_mask.spb_high_sizem1;
321 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
322 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
323 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
324 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
325 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
326 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
327 aq->rq_mask.ena = ~aq->rq_mask.ena;
328 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
329 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
330 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
331 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
332 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
334 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
335 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
336 aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
337 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
338 aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
339 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass;
340 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass;
342 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
343 aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
344 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
345 aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
346 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop;
347 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop;
355 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
357 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
358 struct mbox *mbox = (&nix->dev)->mbox;
359 bool is_cn9k = roc_model_is_cn9k();
362 if (roc_nix == NULL || rq == NULL)
363 return NIX_ERR_PARAM;
365 if (rq->qid >= nix->nb_rx_queues)
366 return NIX_ERR_QUEUE_INVALID_RANGE;
368 rq->roc_nix = roc_nix;
371 rc = rq_cn9k_cfg(nix, rq, false, ena);
373 rc = rq_cfg(nix, rq, false, ena);
378 return mbox_process(mbox);
382 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
384 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
385 struct mbox *mbox = (&nix->dev)->mbox;
386 bool is_cn9k = roc_model_is_cn9k();
389 if (roc_nix == NULL || rq == NULL)
390 return NIX_ERR_PARAM;
392 if (rq->qid >= nix->nb_rx_queues)
393 return NIX_ERR_QUEUE_INVALID_RANGE;
395 rq->roc_nix = roc_nix;
398 rc = rq_cn9k_cfg(nix, rq, true, ena);
400 rc = rq_cfg(nix, rq, true, ena);
405 return mbox_process(mbox);
409 roc_nix_rq_fini(struct roc_nix_rq *rq)
411 /* Disabling RQ is sufficient */
412 return roc_nix_rq_ena_dis(rq, false);
416 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
418 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
419 struct mbox *mbox = (&nix->dev)->mbox;
420 volatile struct nix_cq_ctx_s *cq_ctx;
421 enum nix_q_size qsize;
426 return NIX_ERR_PARAM;
428 if (cq->qid >= nix->nb_rx_queues)
429 return NIX_ERR_QUEUE_INVALID_RANGE;
431 qsize = nix_qsize_clampup(cq->nb_desc);
432 cq->nb_desc = nix_qsize_to_val(qsize);
433 cq->qmask = cq->nb_desc - 1;
434 cq->door = nix->base + NIX_LF_CQ_OP_DOOR;
435 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
436 cq->wdata = (uint64_t)cq->qid << 32;
437 cq->roc_nix = roc_nix;
438 cq->drop_thresh = NIX_CQ_THRESH_LEVEL;
441 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ;
442 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN);
443 if (cq->desc_base == NULL) {
448 if (roc_model_is_cn9k()) {
449 struct nix_aq_enq_req *aq;
451 aq = mbox_alloc_msg_nix_aq_enq(mbox);
453 aq->ctype = NIX_AQ_CTYPE_CQ;
454 aq->op = NIX_AQ_INSTOP_INIT;
457 struct nix_cn10k_aq_enq_req *aq;
459 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
461 aq->ctype = NIX_AQ_CTYPE_CQ;
462 aq->op = NIX_AQ_INSTOP_INIT;
468 cq_ctx->qsize = qsize;
469 cq_ctx->base = (uint64_t)cq->desc_base;
470 cq_ctx->avg_level = 0xff;
471 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
472 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
474 /* Many to one reduction */
475 cq_ctx->qint_idx = cq->qid % nix->qints;
476 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
477 cq_ctx->cint_idx = cq->qid;
479 cq_ctx->drop = cq->drop_thresh;
480 cq_ctx->drop_ena = 1;
482 /* TX pause frames enable flow ctrl on RX side */
484 /* Single BPID is allocated for all rx channels for now */
485 cq_ctx->bpid = nix->bpid[0];
486 cq_ctx->bp = cq_ctx->drop;
490 rc = mbox_process(mbox);
497 plt_free(cq->desc_base);
503 roc_nix_cq_fini(struct roc_nix_cq *cq)
510 return NIX_ERR_PARAM;
512 nix = roc_nix_to_nix_priv(cq->roc_nix);
513 mbox = (&nix->dev)->mbox;
516 if (roc_model_is_cn9k()) {
517 struct nix_aq_enq_req *aq;
519 aq = mbox_alloc_msg_nix_aq_enq(mbox);
521 aq->ctype = NIX_AQ_CTYPE_CQ;
522 aq->op = NIX_AQ_INSTOP_WRITE;
525 aq->cq_mask.ena = ~aq->cq_mask.ena;
526 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
528 struct nix_cn10k_aq_enq_req *aq;
530 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
532 aq->ctype = NIX_AQ_CTYPE_CQ;
533 aq->op = NIX_AQ_INSTOP_WRITE;
536 aq->cq_mask.ena = ~aq->cq_mask.ena;
537 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
540 rc = mbox_process(mbox);
544 plt_free(cq->desc_base);
549 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
551 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
552 uint16_t sqes_per_sqb, count, nb_sqb_bufs;
553 struct npa_pool_s pool;
554 struct npa_aura_s aura;
559 blk_sz = nix->sqb_size;
560 if (sq->max_sqe_sz == roc_nix_maxsqesz_w16)
561 sqes_per_sqb = (blk_sz / 8) / 16;
563 sqes_per_sqb = (blk_sz / 8) / 8;
565 sq->nb_desc = PLT_MAX(256U, sq->nb_desc);
566 nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
567 nb_sqb_bufs += NIX_SQB_LIST_SPACE;
568 /* Clamp up the SQB count */
569 nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
570 (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
572 sq->nb_sqb_bufs = nb_sqb_bufs;
573 sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
574 sq->nb_sqb_bufs_adj =
576 (PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
577 sq->nb_sqb_bufs_adj =
578 (sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100;
580 /* Explicitly set nat_align alone as by default pool is with both
581 * nat_align and buf_offset = 1 which we don't want for SQB.
583 memset(&pool, 0, sizeof(struct npa_pool_s));
586 memset(&aura, 0, sizeof(aura));
588 if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0())
589 aura.fc_stype = 0x0; /* STF */
591 aura.fc_stype = 0x3; /* STSTP */
592 aura.fc_addr = (uint64_t)sq->fc;
593 aura.fc_hyst_bits = 0; /* Store count on all updates */
594 rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
599 sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
600 if (sq->sqe_mem == NULL) {
605 /* Fill the initial buffers */
606 iova = (uint64_t)sq->sqe_mem;
607 for (count = 0; count < NIX_MAX_SQB; count++) {
608 roc_npa_aura_op_free(sq->aura_handle, 0, iova);
611 roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
612 roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
613 sq->aura_sqb_bufs = NIX_MAX_SQB;
617 roc_npa_pool_destroy(sq->aura_handle);
623 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
626 struct mbox *mbox = (&nix->dev)->mbox;
627 struct nix_aq_enq_req *aq;
629 aq = mbox_alloc_msg_nix_aq_enq(mbox);
631 aq->ctype = NIX_AQ_CTYPE_SQ;
632 aq->op = NIX_AQ_INSTOP_INIT;
633 aq->sq.max_sqe_size = sq->max_sqe_sz;
635 aq->sq.max_sqe_size = sq->max_sqe_sz;
637 aq->sq.smq_rr_quantum = rr_quantum;
638 aq->sq.default_chan = nix->tx_chan_base;
639 aq->sq.sqe_stype = NIX_STYPE_STF;
641 aq->sq.sso_ena = !!sq->sso_ena;
642 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
643 aq->sq.sqe_stype = NIX_STYPE_STP;
644 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
645 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
646 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
647 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
648 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
650 /* Many to one reduction */
651 aq->sq.qint_idx = sq->qid % nix->qints;
655 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
657 struct mbox *mbox = (&nix->dev)->mbox;
658 struct nix_aq_enq_rsp *rsp;
659 struct nix_aq_enq_req *aq;
660 uint16_t sqes_per_sqb;
664 aq = mbox_alloc_msg_nix_aq_enq(mbox);
666 aq->ctype = NIX_AQ_CTYPE_SQ;
667 aq->op = NIX_AQ_INSTOP_READ;
668 rc = mbox_process_msg(mbox, (void *)&rsp);
672 /* Check if sq is already cleaned up */
677 aq = mbox_alloc_msg_nix_aq_enq(mbox);
679 aq->ctype = NIX_AQ_CTYPE_SQ;
680 aq->op = NIX_AQ_INSTOP_WRITE;
681 aq->sq_mask.ena = ~aq->sq_mask.ena;
683 rc = mbox_process(mbox);
687 /* Read SQ and free sqb's */
688 aq = mbox_alloc_msg_nix_aq_enq(mbox);
690 aq->ctype = NIX_AQ_CTYPE_SQ;
691 aq->op = NIX_AQ_INSTOP_READ;
692 rc = mbox_process_msg(mbox, (void *)&rsp);
697 plt_err("SQ has pending SQE's");
699 count = aq->sq.sqb_count;
700 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
701 /* Free SQB's that are used */
702 sqb_buf = (void *)rsp->sq.head_sqb;
706 next_sqb = *(void **)((uintptr_t)sqb_buf +
707 (uint32_t)((sqes_per_sqb - 1) *
709 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
714 /* Free next to use sqb */
715 if (rsp->sq.next_sqb)
716 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
721 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
724 struct mbox *mbox = (&nix->dev)->mbox;
725 struct nix_cn10k_aq_enq_req *aq;
727 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
729 aq->ctype = NIX_AQ_CTYPE_SQ;
730 aq->op = NIX_AQ_INSTOP_INIT;
731 aq->sq.max_sqe_size = sq->max_sqe_sz;
733 aq->sq.max_sqe_size = sq->max_sqe_sz;
735 aq->sq.smq_rr_weight = rr_quantum;
736 aq->sq.default_chan = nix->tx_chan_base;
737 aq->sq.sqe_stype = NIX_STYPE_STF;
739 aq->sq.sso_ena = !!sq->sso_ena;
740 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
741 aq->sq.sqe_stype = NIX_STYPE_STP;
742 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
743 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
744 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
745 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
746 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
748 /* Many to one reduction */
749 aq->sq.qint_idx = sq->qid % nix->qints;
753 sq_fini(struct nix *nix, struct roc_nix_sq *sq)
755 struct mbox *mbox = (&nix->dev)->mbox;
756 struct nix_cn10k_aq_enq_rsp *rsp;
757 struct nix_cn10k_aq_enq_req *aq;
758 uint16_t sqes_per_sqb;
762 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
764 aq->ctype = NIX_AQ_CTYPE_SQ;
765 aq->op = NIX_AQ_INSTOP_READ;
766 rc = mbox_process_msg(mbox, (void *)&rsp);
770 /* Check if sq is already cleaned up */
775 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
777 aq->ctype = NIX_AQ_CTYPE_SQ;
778 aq->op = NIX_AQ_INSTOP_WRITE;
779 aq->sq_mask.ena = ~aq->sq_mask.ena;
781 rc = mbox_process(mbox);
785 /* Read SQ and free sqb's */
786 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
788 aq->ctype = NIX_AQ_CTYPE_SQ;
789 aq->op = NIX_AQ_INSTOP_READ;
790 rc = mbox_process_msg(mbox, (void *)&rsp);
795 plt_err("SQ has pending SQE's");
797 count = aq->sq.sqb_count;
798 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
799 /* Free SQB's that are used */
800 sqb_buf = (void *)rsp->sq.head_sqb;
804 next_sqb = *(void **)((uintptr_t)sqb_buf +
805 (uint32_t)((sqes_per_sqb - 1) *
807 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
812 /* Free next to use sqb */
813 if (rsp->sq.next_sqb)
814 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
819 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
821 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
822 struct mbox *mbox = (&nix->dev)->mbox;
823 uint16_t qid, smq = UINT16_MAX;
824 uint32_t rr_quantum = 0;
828 return NIX_ERR_PARAM;
831 if (qid >= nix->nb_tx_queues)
832 return NIX_ERR_QUEUE_INVALID_RANGE;
834 sq->roc_nix = roc_nix;
836 * Allocate memory for flow control updates from HW.
837 * Alloc one cache line, so that fits all FC_STYPE modes.
839 sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
840 if (sq->fc == NULL) {
845 rc = sqb_pool_populate(roc_nix, sq);
849 rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq);
851 rc = NIX_ERR_TM_LEAF_NODE_GET;
855 /* Init SQ context */
856 if (roc_model_is_cn9k())
857 sq_cn9k_init(nix, sq, rr_quantum, smq);
859 sq_init(nix, sq, rr_quantum, smq);
861 rc = mbox_process(mbox);
866 sq->io_addr = nix->base + NIX_LF_OP_SENDX(0);
867 /* Evenly distribute LMT slot for each sq */
868 if (roc_model_is_cn9k()) {
869 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */
870 sq->lmt_addr = (void *)(nix->lmt_base +
871 ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12));
882 roc_nix_sq_fini(struct roc_nix_sq *sq)
886 struct ndc_sync_op *ndc_req;
891 return NIX_ERR_PARAM;
893 nix = roc_nix_to_nix_priv(sq->roc_nix);
894 mbox = (&nix->dev)->mbox;
898 rc = nix_tm_sq_flush_pre(sq);
900 /* Release SQ context */
901 if (roc_model_is_cn9k())
902 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
904 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
906 /* Sync NDC-NIX-TX for LF */
907 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
910 ndc_req->nix_lf_tx_sync = 1;
911 if (mbox_process(mbox))
912 rc |= NIX_ERR_NDC_SYNC;
914 rc |= nix_tm_sq_flush_post(sq);
915 rc |= roc_npa_pool_destroy(sq->aura_handle);
917 plt_free(sq->sqe_mem);
918 nix->sqs[qid] = NULL;