1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 static inline uint32_t
11 nix_qsize_to_val(enum nix_q_size qsize)
13 return (16UL << (qsize * 2));
16 static inline enum nix_q_size
17 nix_qsize_clampup(uint32_t val)
19 int i = nix_q_size_16;
21 for (; i < nix_q_size_max; i++)
22 if (val <= nix_qsize_to_val(i))
25 if (i >= nix_q_size_max)
26 i = nix_q_size_max - 1;
32 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
34 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
35 struct mbox *mbox = (&nix->dev)->mbox;
38 /* Pkts will be dropped silently if RQ is disabled */
39 if (roc_model_is_cn9k()) {
40 struct nix_aq_enq_req *aq;
42 aq = mbox_alloc_msg_nix_aq_enq(mbox);
44 aq->ctype = NIX_AQ_CTYPE_RQ;
45 aq->op = NIX_AQ_INSTOP_WRITE;
48 aq->rq_mask.ena = ~(aq->rq_mask.ena);
50 struct nix_cn10k_aq_enq_req *aq;
52 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
54 aq->ctype = NIX_AQ_CTYPE_RQ;
55 aq->op = NIX_AQ_INSTOP_WRITE;
58 aq->rq_mask.ena = ~(aq->rq_mask.ena);
61 rc = mbox_process(mbox);
63 if (roc_model_is_cn10k())
64 plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH);
69 rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
71 struct mbox *mbox = (&nix->dev)->mbox;
72 struct nix_aq_enq_req *aq;
74 aq = mbox_alloc_msg_nix_aq_enq(mbox);
76 aq->ctype = NIX_AQ_CTYPE_RQ;
77 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
82 aq->rq.sso_tt = rq->tt;
83 aq->rq.sso_grp = rq->hwgrp;
85 aq->rq.wqe_skip = rq->wqe_skip;
86 aq->rq.wqe_caching = 1;
88 aq->rq.good_utag = rq->tag_mask >> 24;
89 aq->rq.bad_utag = rq->tag_mask >> 24;
90 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
94 aq->rq.good_utag = rq->tag_mask >> 24;
95 aq->rq.bad_utag = rq->tag_mask >> 24;
96 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
101 aq->rq.ipsech_ena = 1;
104 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
106 /* Sizes must be aligned to 8 bytes */
107 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
110 /* Expressed in number of dwords */
111 aq->rq.first_skip = rq->first_skip / 8;
112 aq->rq.later_skip = rq->later_skip / 8;
113 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
114 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
115 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
117 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
118 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
119 aq->rq.rq_int_ena = 0;
120 /* Many to one reduction */
121 aq->rq.qint_idx = rq->qid % nix->qints;
122 aq->rq.xqe_drop_ena = 1;
124 /* If RED enabled, then fill enable for all cases */
125 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
126 aq->rq.spb_aura_pass = rq->spb_red_pass;
127 aq->rq.lpb_aura_pass = rq->red_pass;
129 aq->rq.spb_aura_drop = rq->spb_red_drop;
130 aq->rq.lpb_aura_drop = rq->red_drop;
136 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
137 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
138 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
139 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
140 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
141 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
142 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
143 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
144 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
147 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
148 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
149 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
150 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
151 aq->rq_mask.cq = ~aq->rq_mask.cq;
155 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
157 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
158 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
159 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
160 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
161 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
162 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
163 aq->rq_mask.ena = ~aq->rq_mask.ena;
164 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
165 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
166 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
167 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
168 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
170 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
171 aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
172 aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
174 aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
175 aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
183 rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
185 struct mbox *mbox = (&nix->dev)->mbox;
186 struct nix_cn10k_aq_enq_req *aq;
188 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
190 aq->ctype = NIX_AQ_CTYPE_RQ;
191 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
196 aq->rq.sso_tt = rq->tt;
197 aq->rq.sso_grp = rq->hwgrp;
199 aq->rq.wqe_skip = rq->wqe_skip;
200 aq->rq.wqe_caching = 1;
202 aq->rq.good_utag = rq->tag_mask >> 24;
203 aq->rq.bad_utag = rq->tag_mask >> 24;
204 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
207 aq->rq.vwqe_ena = true;
208 aq->rq.vwqe_skip = rq->vwqe_first_skip;
209 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
210 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
211 aq->rq.vtime_wait = rq->vwqe_wait_tmo;
212 aq->rq.wqe_aura = rq->vwqe_aura_handle;
217 aq->rq.good_utag = rq->tag_mask >> 24;
218 aq->rq.bad_utag = rq->tag_mask >> 24;
219 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
224 aq->rq.ipsech_ena = 1;
226 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
228 /* Sizes must be aligned to 8 bytes */
229 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
232 /* Expressed in number of dwords */
233 aq->rq.first_skip = rq->first_skip / 8;
234 aq->rq.later_skip = rq->later_skip / 8;
235 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
236 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
237 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
245 roc_npa_aura_handle_to_aura(rq->spb_aura_handle);
247 if (rq->spb_size & 0x7 ||
248 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE)
251 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */
252 spb_sizem1 -= 1; /* Expressed in size minus one */
253 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F;
254 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7;
259 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
260 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
261 aq->rq.rq_int_ena = 0;
262 /* Many to one reduction */
263 aq->rq.qint_idx = rq->qid % nix->qints;
264 aq->rq.xqe_drop_ena = 1;
266 /* If RED enabled, then fill enable for all cases */
267 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
268 aq->rq.spb_pool_pass = rq->red_pass;
269 aq->rq.spb_aura_pass = rq->red_pass;
270 aq->rq.lpb_pool_pass = rq->red_pass;
271 aq->rq.lpb_aura_pass = rq->red_pass;
272 aq->rq.wqe_pool_pass = rq->red_pass;
273 aq->rq.xqe_pass = rq->red_pass;
275 aq->rq.spb_pool_drop = rq->red_drop;
276 aq->rq.spb_aura_drop = rq->red_drop;
277 aq->rq.lpb_pool_drop = rq->red_drop;
278 aq->rq.lpb_aura_drop = rq->red_drop;
279 aq->rq.wqe_pool_drop = rq->red_drop;
280 aq->rq.xqe_drop = rq->red_drop;
286 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
287 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
288 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
289 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
290 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
291 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
292 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
293 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
294 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
296 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena;
297 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip;
298 aq->rq_mask.max_vsize_exp =
299 ~aq->rq_mask.max_vsize_exp;
300 aq->rq_mask.vtime_wait =
301 ~aq->rq_mask.vtime_wait;
302 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura;
306 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
307 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
308 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
309 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
310 aq->rq_mask.cq = ~aq->rq_mask.cq;
314 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
317 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura;
318 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1;
319 aq->rq_mask.spb_high_sizem1 =
320 ~aq->rq_mask.spb_high_sizem1;
323 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
324 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
325 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
326 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
327 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
328 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
329 aq->rq_mask.ena = ~aq->rq_mask.ena;
330 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
331 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
332 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
333 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
334 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
336 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
337 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
338 aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
339 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
340 aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
341 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass;
342 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass;
344 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
345 aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
346 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
347 aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
348 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop;
349 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop;
357 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
359 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
360 struct mbox *mbox = (&nix->dev)->mbox;
361 bool is_cn9k = roc_model_is_cn9k();
364 if (roc_nix == NULL || rq == NULL)
365 return NIX_ERR_PARAM;
367 if (rq->qid >= nix->nb_rx_queues)
368 return NIX_ERR_QUEUE_INVALID_RANGE;
370 rq->roc_nix = roc_nix;
373 rc = rq_cn9k_cfg(nix, rq, false, ena);
375 rc = rq_cfg(nix, rq, false, ena);
380 return mbox_process(mbox);
384 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
386 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
387 struct mbox *mbox = (&nix->dev)->mbox;
388 bool is_cn9k = roc_model_is_cn9k();
391 if (roc_nix == NULL || rq == NULL)
392 return NIX_ERR_PARAM;
394 if (rq->qid >= nix->nb_rx_queues)
395 return NIX_ERR_QUEUE_INVALID_RANGE;
397 rq->roc_nix = roc_nix;
400 rc = rq_cn9k_cfg(nix, rq, true, ena);
402 rc = rq_cfg(nix, rq, true, ena);
407 return mbox_process(mbox);
411 roc_nix_rq_fini(struct roc_nix_rq *rq)
413 /* Disabling RQ is sufficient */
414 return roc_nix_rq_ena_dis(rq, false);
418 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
420 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
421 struct mbox *mbox = (&nix->dev)->mbox;
422 volatile struct nix_cq_ctx_s *cq_ctx;
423 enum nix_q_size qsize;
428 return NIX_ERR_PARAM;
430 if (cq->qid >= nix->nb_rx_queues)
431 return NIX_ERR_QUEUE_INVALID_RANGE;
433 qsize = nix_qsize_clampup(cq->nb_desc);
434 cq->nb_desc = nix_qsize_to_val(qsize);
435 cq->qmask = cq->nb_desc - 1;
436 cq->door = nix->base + NIX_LF_CQ_OP_DOOR;
437 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
438 cq->wdata = (uint64_t)cq->qid << 32;
439 cq->roc_nix = roc_nix;
442 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ;
443 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN);
444 if (cq->desc_base == NULL) {
449 if (roc_model_is_cn9k()) {
450 struct nix_aq_enq_req *aq;
452 aq = mbox_alloc_msg_nix_aq_enq(mbox);
454 aq->ctype = NIX_AQ_CTYPE_CQ;
455 aq->op = NIX_AQ_INSTOP_INIT;
458 struct nix_cn10k_aq_enq_req *aq;
460 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
462 aq->ctype = NIX_AQ_CTYPE_CQ;
463 aq->op = NIX_AQ_INSTOP_INIT;
469 cq_ctx->qsize = qsize;
470 cq_ctx->base = (uint64_t)cq->desc_base;
471 cq_ctx->avg_level = 0xff;
472 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
473 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
475 /* Many to one reduction */
476 cq_ctx->qint_idx = cq->qid % nix->qints;
477 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
478 cq_ctx->cint_idx = cq->qid;
480 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
481 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
482 uint16_t min_rx_drop;
484 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc);
485 cq_ctx->drop = min_rx_drop;
486 cq_ctx->drop_ena = 1;
487 cq->drop_thresh = min_rx_drop;
489 cq->drop_thresh = NIX_CQ_THRESH_LEVEL;
490 cq_ctx->drop = cq->drop_thresh;
491 cq_ctx->drop_ena = 1;
494 /* TX pause frames enable flow ctrl on RX side */
496 /* Single BPID is allocated for all rx channels for now */
497 cq_ctx->bpid = nix->bpid[0];
498 cq_ctx->bp = cq_ctx->drop;
502 rc = mbox_process(mbox);
509 plt_free(cq->desc_base);
515 roc_nix_cq_fini(struct roc_nix_cq *cq)
522 return NIX_ERR_PARAM;
524 nix = roc_nix_to_nix_priv(cq->roc_nix);
525 mbox = (&nix->dev)->mbox;
528 if (roc_model_is_cn9k()) {
529 struct nix_aq_enq_req *aq;
531 aq = mbox_alloc_msg_nix_aq_enq(mbox);
533 aq->ctype = NIX_AQ_CTYPE_CQ;
534 aq->op = NIX_AQ_INSTOP_WRITE;
537 aq->cq_mask.ena = ~aq->cq_mask.ena;
538 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
540 struct nix_cn10k_aq_enq_req *aq;
542 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
544 aq->ctype = NIX_AQ_CTYPE_CQ;
545 aq->op = NIX_AQ_INSTOP_WRITE;
548 aq->cq_mask.ena = ~aq->cq_mask.ena;
549 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
552 rc = mbox_process(mbox);
556 plt_free(cq->desc_base);
561 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
563 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
564 uint16_t sqes_per_sqb, count, nb_sqb_bufs;
565 struct npa_pool_s pool;
566 struct npa_aura_s aura;
571 blk_sz = nix->sqb_size;
572 if (sq->max_sqe_sz == roc_nix_maxsqesz_w16)
573 sqes_per_sqb = (blk_sz / 8) / 16;
575 sqes_per_sqb = (blk_sz / 8) / 8;
577 sq->nb_desc = PLT_MAX(256U, sq->nb_desc);
578 nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
579 nb_sqb_bufs += NIX_SQB_LIST_SPACE;
580 /* Clamp up the SQB count */
581 nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
582 (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
584 sq->nb_sqb_bufs = nb_sqb_bufs;
585 sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
586 sq->nb_sqb_bufs_adj =
588 (PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
589 sq->nb_sqb_bufs_adj =
590 (sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100;
592 /* Explicitly set nat_align alone as by default pool is with both
593 * nat_align and buf_offset = 1 which we don't want for SQB.
595 memset(&pool, 0, sizeof(struct npa_pool_s));
598 memset(&aura, 0, sizeof(aura));
600 if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0())
601 aura.fc_stype = 0x0; /* STF */
603 aura.fc_stype = 0x3; /* STSTP */
604 aura.fc_addr = (uint64_t)sq->fc;
605 aura.fc_hyst_bits = 0; /* Store count on all updates */
606 rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
611 sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
612 if (sq->sqe_mem == NULL) {
617 /* Fill the initial buffers */
618 iova = (uint64_t)sq->sqe_mem;
619 for (count = 0; count < NIX_MAX_SQB; count++) {
620 roc_npa_aura_op_free(sq->aura_handle, 0, iova);
623 roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
624 roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
625 sq->aura_sqb_bufs = NIX_MAX_SQB;
629 roc_npa_pool_destroy(sq->aura_handle);
635 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
638 struct mbox *mbox = (&nix->dev)->mbox;
639 struct nix_aq_enq_req *aq;
641 aq = mbox_alloc_msg_nix_aq_enq(mbox);
643 aq->ctype = NIX_AQ_CTYPE_SQ;
644 aq->op = NIX_AQ_INSTOP_INIT;
645 aq->sq.max_sqe_size = sq->max_sqe_sz;
647 aq->sq.max_sqe_size = sq->max_sqe_sz;
649 aq->sq.smq_rr_quantum = rr_quantum;
650 aq->sq.default_chan = nix->tx_chan_base;
651 aq->sq.sqe_stype = NIX_STYPE_STF;
653 aq->sq.sso_ena = !!sq->sso_ena;
654 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
655 aq->sq.sqe_stype = NIX_STYPE_STP;
656 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
657 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
658 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
659 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
660 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
662 /* Many to one reduction */
663 aq->sq.qint_idx = sq->qid % nix->qints;
667 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
669 struct mbox *mbox = (&nix->dev)->mbox;
670 struct nix_aq_enq_rsp *rsp;
671 struct nix_aq_enq_req *aq;
672 uint16_t sqes_per_sqb;
676 aq = mbox_alloc_msg_nix_aq_enq(mbox);
678 aq->ctype = NIX_AQ_CTYPE_SQ;
679 aq->op = NIX_AQ_INSTOP_READ;
680 rc = mbox_process_msg(mbox, (void *)&rsp);
684 /* Check if sq is already cleaned up */
689 aq = mbox_alloc_msg_nix_aq_enq(mbox);
691 aq->ctype = NIX_AQ_CTYPE_SQ;
692 aq->op = NIX_AQ_INSTOP_WRITE;
693 aq->sq_mask.ena = ~aq->sq_mask.ena;
695 rc = mbox_process(mbox);
699 /* Read SQ and free sqb's */
700 aq = mbox_alloc_msg_nix_aq_enq(mbox);
702 aq->ctype = NIX_AQ_CTYPE_SQ;
703 aq->op = NIX_AQ_INSTOP_READ;
704 rc = mbox_process_msg(mbox, (void *)&rsp);
709 plt_err("SQ has pending SQE's");
711 count = aq->sq.sqb_count;
712 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
713 /* Free SQB's that are used */
714 sqb_buf = (void *)rsp->sq.head_sqb;
718 next_sqb = *(void **)((uintptr_t)sqb_buf +
719 (uint32_t)((sqes_per_sqb - 1) *
721 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
726 /* Free next to use sqb */
727 if (rsp->sq.next_sqb)
728 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
733 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
736 struct mbox *mbox = (&nix->dev)->mbox;
737 struct nix_cn10k_aq_enq_req *aq;
739 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
741 aq->ctype = NIX_AQ_CTYPE_SQ;
742 aq->op = NIX_AQ_INSTOP_INIT;
743 aq->sq.max_sqe_size = sq->max_sqe_sz;
745 aq->sq.max_sqe_size = sq->max_sqe_sz;
747 aq->sq.smq_rr_weight = rr_quantum;
748 aq->sq.default_chan = nix->tx_chan_base;
749 aq->sq.sqe_stype = NIX_STYPE_STF;
751 aq->sq.sso_ena = !!sq->sso_ena;
752 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
753 aq->sq.sqe_stype = NIX_STYPE_STP;
754 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
755 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
756 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
757 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
758 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
760 /* Many to one reduction */
761 aq->sq.qint_idx = sq->qid % nix->qints;
765 sq_fini(struct nix *nix, struct roc_nix_sq *sq)
767 struct mbox *mbox = (&nix->dev)->mbox;
768 struct nix_cn10k_aq_enq_rsp *rsp;
769 struct nix_cn10k_aq_enq_req *aq;
770 uint16_t sqes_per_sqb;
774 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
776 aq->ctype = NIX_AQ_CTYPE_SQ;
777 aq->op = NIX_AQ_INSTOP_READ;
778 rc = mbox_process_msg(mbox, (void *)&rsp);
782 /* Check if sq is already cleaned up */
787 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
789 aq->ctype = NIX_AQ_CTYPE_SQ;
790 aq->op = NIX_AQ_INSTOP_WRITE;
791 aq->sq_mask.ena = ~aq->sq_mask.ena;
793 rc = mbox_process(mbox);
797 /* Read SQ and free sqb's */
798 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
800 aq->ctype = NIX_AQ_CTYPE_SQ;
801 aq->op = NIX_AQ_INSTOP_READ;
802 rc = mbox_process_msg(mbox, (void *)&rsp);
807 plt_err("SQ has pending SQE's");
809 count = aq->sq.sqb_count;
810 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
811 /* Free SQB's that are used */
812 sqb_buf = (void *)rsp->sq.head_sqb;
816 next_sqb = *(void **)((uintptr_t)sqb_buf +
817 (uint32_t)((sqes_per_sqb - 1) *
819 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
824 /* Free next to use sqb */
825 if (rsp->sq.next_sqb)
826 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
831 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
833 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
834 struct mbox *mbox = (&nix->dev)->mbox;
835 uint16_t qid, smq = UINT16_MAX;
836 uint32_t rr_quantum = 0;
840 return NIX_ERR_PARAM;
843 if (qid >= nix->nb_tx_queues)
844 return NIX_ERR_QUEUE_INVALID_RANGE;
846 sq->roc_nix = roc_nix;
848 * Allocate memory for flow control updates from HW.
849 * Alloc one cache line, so that fits all FC_STYPE modes.
851 sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
852 if (sq->fc == NULL) {
857 rc = sqb_pool_populate(roc_nix, sq);
861 rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq);
863 rc = NIX_ERR_TM_LEAF_NODE_GET;
867 /* Init SQ context */
868 if (roc_model_is_cn9k())
869 sq_cn9k_init(nix, sq, rr_quantum, smq);
871 sq_init(nix, sq, rr_quantum, smq);
873 rc = mbox_process(mbox);
878 sq->io_addr = nix->base + NIX_LF_OP_SENDX(0);
879 /* Evenly distribute LMT slot for each sq */
880 if (roc_model_is_cn9k()) {
881 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */
882 sq->lmt_addr = (void *)(nix->lmt_base +
883 ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12));
894 roc_nix_sq_fini(struct roc_nix_sq *sq)
898 struct ndc_sync_op *ndc_req;
903 return NIX_ERR_PARAM;
905 nix = roc_nix_to_nix_priv(sq->roc_nix);
906 mbox = (&nix->dev)->mbox;
910 rc = nix_tm_sq_flush_pre(sq);
912 /* Release SQ context */
913 if (roc_model_is_cn9k())
914 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
916 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
918 /* Sync NDC-NIX-TX for LF */
919 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
922 ndc_req->nix_lf_tx_sync = 1;
923 if (mbox_process(mbox))
924 rc |= NIX_ERR_NDC_SYNC;
926 rc |= nix_tm_sq_flush_post(sq);
927 rc |= roc_npa_pool_destroy(sq->aura_handle);
929 plt_free(sq->sqe_mem);
930 nix->sqs[qid] = NULL;