1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 static inline uint32_t
11 nix_qsize_to_val(enum nix_q_size qsize)
13 return (16UL << (qsize * 2));
16 static inline enum nix_q_size
17 nix_qsize_clampup(uint32_t val)
19 int i = nix_q_size_16;
21 for (; i < nix_q_size_max; i++)
22 if (val <= nix_qsize_to_val(i))
25 if (i >= nix_q_size_max)
26 i = nix_q_size_max - 1;
32 nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
34 struct mbox *mbox = dev->mbox;
36 /* Pkts will be dropped silently if RQ is disabled */
37 if (roc_model_is_cn9k()) {
38 struct nix_aq_enq_req *aq;
40 aq = mbox_alloc_msg_nix_aq_enq(mbox);
42 aq->ctype = NIX_AQ_CTYPE_RQ;
43 aq->op = NIX_AQ_INSTOP_WRITE;
46 aq->rq_mask.ena = ~(aq->rq_mask.ena);
48 struct nix_cn10k_aq_enq_req *aq;
50 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
52 aq->ctype = NIX_AQ_CTYPE_RQ;
53 aq->op = NIX_AQ_INSTOP_WRITE;
56 aq->rq_mask.ena = ~(aq->rq_mask.ena);
59 return mbox_process(mbox);
63 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
65 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
68 rc = nix_rq_ena_dis(&nix->dev, rq, enable);
70 if (roc_model_is_cn10k())
71 plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH);
76 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
79 struct mbox *mbox = dev->mbox;
80 struct nix_aq_enq_req *aq;
82 aq = mbox_alloc_msg_nix_aq_enq(mbox);
84 aq->ctype = NIX_AQ_CTYPE_RQ;
85 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
90 aq->rq.sso_tt = rq->tt;
91 aq->rq.sso_grp = rq->hwgrp;
93 aq->rq.wqe_skip = rq->wqe_skip;
94 aq->rq.wqe_caching = 1;
96 aq->rq.good_utag = rq->tag_mask >> 24;
97 aq->rq.bad_utag = rq->tag_mask >> 24;
98 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
102 aq->rq.good_utag = rq->tag_mask >> 24;
103 aq->rq.bad_utag = rq->tag_mask >> 24;
104 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
109 aq->rq.ipsech_ena = 1;
112 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
114 /* Sizes must be aligned to 8 bytes */
115 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
118 /* Expressed in number of dwords */
119 aq->rq.first_skip = rq->first_skip / 8;
120 aq->rq.later_skip = rq->later_skip / 8;
121 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
122 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
123 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
125 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
126 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
127 aq->rq.rq_int_ena = 0;
128 /* Many to one reduction */
129 aq->rq.qint_idx = rq->qid % qints;
130 aq->rq.xqe_drop_ena = 1;
132 /* If RED enabled, then fill enable for all cases */
133 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
134 aq->rq.spb_pool_pass = rq->spb_red_pass;
135 aq->rq.lpb_pool_pass = rq->red_pass;
137 aq->rq.spb_pool_drop = rq->spb_red_drop;
138 aq->rq.lpb_pool_drop = rq->red_drop;
144 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
145 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
146 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
147 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
148 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
149 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
150 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
151 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
152 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
155 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
156 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
157 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
158 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
159 aq->rq_mask.cq = ~aq->rq_mask.cq;
163 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
165 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
166 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
167 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
168 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
169 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
170 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
171 aq->rq_mask.ena = ~aq->rq_mask.ena;
172 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
173 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
174 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
175 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
176 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
178 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
179 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
180 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
182 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
183 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
191 nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
194 struct nix_cn10k_aq_enq_req *aq;
195 struct mbox *mbox = dev->mbox;
197 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
199 aq->ctype = NIX_AQ_CTYPE_RQ;
200 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
205 aq->rq.sso_tt = rq->tt;
206 aq->rq.sso_grp = rq->hwgrp;
208 aq->rq.wqe_skip = rq->wqe_skip;
209 aq->rq.wqe_caching = 1;
211 aq->rq.good_utag = rq->tag_mask >> 24;
212 aq->rq.bad_utag = rq->tag_mask >> 24;
213 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
216 aq->rq.vwqe_ena = true;
217 aq->rq.vwqe_skip = rq->vwqe_first_skip;
218 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
219 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
220 aq->rq.vtime_wait = rq->vwqe_wait_tmo;
221 aq->rq.wqe_aura = rq->vwqe_aura_handle;
226 aq->rq.good_utag = rq->tag_mask >> 24;
227 aq->rq.bad_utag = rq->tag_mask >> 24;
228 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
232 if (rq->ipsech_ena) {
233 aq->rq.ipsech_ena = 1;
234 aq->rq.ipsecd_drop_en = 1;
237 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
239 /* Sizes must be aligned to 8 bytes */
240 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
243 /* Expressed in number of dwords */
244 aq->rq.first_skip = rq->first_skip / 8;
245 aq->rq.later_skip = rq->later_skip / 8;
246 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
247 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
248 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
256 roc_npa_aura_handle_to_aura(rq->spb_aura_handle);
258 if (rq->spb_size & 0x7 ||
259 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE)
262 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */
263 spb_sizem1 -= 1; /* Expressed in size minus one */
264 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F;
265 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7;
270 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
271 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
272 aq->rq.rq_int_ena = 0;
273 /* Many to one reduction */
274 aq->rq.qint_idx = rq->qid % qints;
275 aq->rq.xqe_drop_ena = 1;
277 /* If RED enabled, then fill enable for all cases */
278 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
279 aq->rq.spb_pool_pass = rq->spb_red_pass;
280 aq->rq.lpb_pool_pass = rq->red_pass;
281 aq->rq.wqe_pool_pass = rq->red_pass;
282 aq->rq.xqe_pass = rq->red_pass;
284 aq->rq.spb_pool_drop = rq->spb_red_drop;
285 aq->rq.lpb_pool_drop = rq->red_drop;
286 aq->rq.wqe_pool_drop = rq->red_drop;
287 aq->rq.xqe_drop = rq->red_drop;
293 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
294 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
295 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
296 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
297 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
298 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
299 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
300 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
301 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
303 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena;
304 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip;
305 aq->rq_mask.max_vsize_exp =
306 ~aq->rq_mask.max_vsize_exp;
307 aq->rq_mask.vtime_wait =
308 ~aq->rq_mask.vtime_wait;
309 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura;
313 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
314 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
315 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
316 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
317 aq->rq_mask.cq = ~aq->rq_mask.cq;
321 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
324 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura;
325 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1;
326 aq->rq_mask.spb_high_sizem1 =
327 ~aq->rq_mask.spb_high_sizem1;
330 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
331 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
332 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
333 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
334 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
335 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
336 aq->rq_mask.ena = ~aq->rq_mask.ena;
337 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
338 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
339 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
340 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
341 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
343 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
344 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
345 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
346 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass;
347 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass;
349 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
350 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
351 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop;
352 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop;
360 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
362 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
363 struct mbox *mbox = (&nix->dev)->mbox;
364 bool is_cn9k = roc_model_is_cn9k();
365 struct dev *dev = &nix->dev;
368 if (roc_nix == NULL || rq == NULL)
369 return NIX_ERR_PARAM;
371 if (rq->qid >= nix->nb_rx_queues)
372 return NIX_ERR_QUEUE_INVALID_RANGE;
374 rq->roc_nix = roc_nix;
377 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena);
379 rc = nix_rq_cfg(dev, rq, nix->qints, false, ena);
384 rc = mbox_process(mbox);
388 return nix_tel_node_add_rq(rq);
392 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
394 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
395 struct mbox *mbox = (&nix->dev)->mbox;
396 bool is_cn9k = roc_model_is_cn9k();
397 struct dev *dev = &nix->dev;
400 if (roc_nix == NULL || rq == NULL)
401 return NIX_ERR_PARAM;
403 if (rq->qid >= nix->nb_rx_queues)
404 return NIX_ERR_QUEUE_INVALID_RANGE;
406 rq->roc_nix = roc_nix;
409 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena);
411 rc = nix_rq_cfg(dev, rq, nix->qints, true, ena);
416 rc = mbox_process(mbox);
420 return nix_tel_node_add_rq(rq);
424 roc_nix_rq_fini(struct roc_nix_rq *rq)
426 /* Disabling RQ is sufficient */
427 return roc_nix_rq_ena_dis(rq, false);
431 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
433 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
434 struct mbox *mbox = (&nix->dev)->mbox;
435 volatile struct nix_cq_ctx_s *cq_ctx;
436 enum nix_q_size qsize;
441 return NIX_ERR_PARAM;
443 if (cq->qid >= nix->nb_rx_queues)
444 return NIX_ERR_QUEUE_INVALID_RANGE;
446 qsize = nix_qsize_clampup(cq->nb_desc);
447 cq->nb_desc = nix_qsize_to_val(qsize);
448 cq->qmask = cq->nb_desc - 1;
449 cq->door = nix->base + NIX_LF_CQ_OP_DOOR;
450 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
451 cq->wdata = (uint64_t)cq->qid << 32;
452 cq->roc_nix = roc_nix;
455 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ;
456 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN);
457 if (cq->desc_base == NULL) {
462 if (roc_model_is_cn9k()) {
463 struct nix_aq_enq_req *aq;
465 aq = mbox_alloc_msg_nix_aq_enq(mbox);
467 aq->ctype = NIX_AQ_CTYPE_CQ;
468 aq->op = NIX_AQ_INSTOP_INIT;
471 struct nix_cn10k_aq_enq_req *aq;
473 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
475 aq->ctype = NIX_AQ_CTYPE_CQ;
476 aq->op = NIX_AQ_INSTOP_INIT;
482 cq_ctx->qsize = qsize;
483 cq_ctx->base = (uint64_t)cq->desc_base;
484 cq_ctx->avg_level = 0xff;
485 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
486 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
488 /* Many to one reduction */
489 cq_ctx->qint_idx = cq->qid % nix->qints;
490 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
491 cq_ctx->cint_idx = cq->qid;
493 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
494 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
495 uint16_t min_rx_drop;
497 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc);
498 cq_ctx->drop = min_rx_drop;
499 cq_ctx->drop_ena = 1;
500 cq->drop_thresh = min_rx_drop;
502 cq->drop_thresh = NIX_CQ_THRESH_LEVEL;
503 /* Drop processing or red drop cannot be enabled due to
504 * due to packets coming for second pass from CPT.
506 if (!roc_nix_inl_inb_is_enabled(roc_nix)) {
507 cq_ctx->drop = cq->drop_thresh;
508 cq_ctx->drop_ena = 1;
512 /* TX pause frames enable flow ctrl on RX side */
514 /* Single BPID is allocated for all rx channels for now */
515 cq_ctx->bpid = nix->bpid[0];
516 cq_ctx->bp = cq->drop_thresh;
520 rc = mbox_process(mbox);
524 return nix_tel_node_add_cq(cq);
527 plt_free(cq->desc_base);
533 roc_nix_cq_fini(struct roc_nix_cq *cq)
540 return NIX_ERR_PARAM;
542 nix = roc_nix_to_nix_priv(cq->roc_nix);
543 mbox = (&nix->dev)->mbox;
546 if (roc_model_is_cn9k()) {
547 struct nix_aq_enq_req *aq;
549 aq = mbox_alloc_msg_nix_aq_enq(mbox);
551 aq->ctype = NIX_AQ_CTYPE_CQ;
552 aq->op = NIX_AQ_INSTOP_WRITE;
555 aq->cq_mask.ena = ~aq->cq_mask.ena;
556 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
558 struct nix_cn10k_aq_enq_req *aq;
560 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
562 aq->ctype = NIX_AQ_CTYPE_CQ;
563 aq->op = NIX_AQ_INSTOP_WRITE;
566 aq->cq_mask.ena = ~aq->cq_mask.ena;
567 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
570 rc = mbox_process(mbox);
574 plt_free(cq->desc_base);
579 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
581 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
582 uint16_t sqes_per_sqb, count, nb_sqb_bufs;
583 struct npa_pool_s pool;
584 struct npa_aura_s aura;
589 blk_sz = nix->sqb_size;
590 if (sq->max_sqe_sz == roc_nix_maxsqesz_w16)
591 sqes_per_sqb = (blk_sz / 8) / 16;
593 sqes_per_sqb = (blk_sz / 8) / 8;
595 sq->nb_desc = PLT_MAX(256U, sq->nb_desc);
596 nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
597 nb_sqb_bufs += NIX_SQB_LIST_SPACE;
598 /* Clamp up the SQB count */
599 nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
600 (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
602 sq->nb_sqb_bufs = nb_sqb_bufs;
603 sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
604 sq->nb_sqb_bufs_adj =
606 (PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
607 sq->nb_sqb_bufs_adj =
608 (sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100;
610 /* Explicitly set nat_align alone as by default pool is with both
611 * nat_align and buf_offset = 1 which we don't want for SQB.
613 memset(&pool, 0, sizeof(struct npa_pool_s));
616 memset(&aura, 0, sizeof(aura));
618 if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0())
619 aura.fc_stype = 0x0; /* STF */
621 aura.fc_stype = 0x3; /* STSTP */
622 aura.fc_addr = (uint64_t)sq->fc;
623 aura.fc_hyst_bits = 0; /* Store count on all updates */
624 rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
629 sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
630 if (sq->sqe_mem == NULL) {
635 /* Fill the initial buffers */
636 iova = (uint64_t)sq->sqe_mem;
637 for (count = 0; count < NIX_MAX_SQB; count++) {
638 roc_npa_aura_op_free(sq->aura_handle, 0, iova);
641 roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
642 roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
643 sq->aura_sqb_bufs = NIX_MAX_SQB;
647 roc_npa_pool_destroy(sq->aura_handle);
653 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
656 struct mbox *mbox = (&nix->dev)->mbox;
657 struct nix_aq_enq_req *aq;
659 aq = mbox_alloc_msg_nix_aq_enq(mbox);
661 aq->ctype = NIX_AQ_CTYPE_SQ;
662 aq->op = NIX_AQ_INSTOP_INIT;
663 aq->sq.max_sqe_size = sq->max_sqe_sz;
665 aq->sq.max_sqe_size = sq->max_sqe_sz;
667 aq->sq.smq_rr_quantum = rr_quantum;
668 aq->sq.default_chan = nix->tx_chan_base;
669 aq->sq.sqe_stype = NIX_STYPE_STF;
671 aq->sq.sso_ena = !!sq->sso_ena;
672 aq->sq.cq_ena = !!sq->cq_ena;
673 aq->sq.cq = sq->cqid;
674 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
675 aq->sq.sqe_stype = NIX_STYPE_STP;
676 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
677 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
678 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
679 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
680 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
682 /* Many to one reduction */
683 aq->sq.qint_idx = sq->qid % nix->qints;
687 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
689 struct mbox *mbox = (&nix->dev)->mbox;
690 struct nix_aq_enq_rsp *rsp;
691 struct nix_aq_enq_req *aq;
692 uint16_t sqes_per_sqb;
696 aq = mbox_alloc_msg_nix_aq_enq(mbox);
698 aq->ctype = NIX_AQ_CTYPE_SQ;
699 aq->op = NIX_AQ_INSTOP_READ;
700 rc = mbox_process_msg(mbox, (void *)&rsp);
704 /* Check if sq is already cleaned up */
709 aq = mbox_alloc_msg_nix_aq_enq(mbox);
711 aq->ctype = NIX_AQ_CTYPE_SQ;
712 aq->op = NIX_AQ_INSTOP_WRITE;
713 aq->sq_mask.ena = ~aq->sq_mask.ena;
715 rc = mbox_process(mbox);
719 /* Read SQ and free sqb's */
720 aq = mbox_alloc_msg_nix_aq_enq(mbox);
722 aq->ctype = NIX_AQ_CTYPE_SQ;
723 aq->op = NIX_AQ_INSTOP_READ;
724 rc = mbox_process_msg(mbox, (void *)&rsp);
729 plt_err("SQ has pending SQE's");
731 count = aq->sq.sqb_count;
732 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
733 /* Free SQB's that are used */
734 sqb_buf = (void *)rsp->sq.head_sqb;
738 next_sqb = *(void **)((uintptr_t)sqb_buf +
739 (uint32_t)((sqes_per_sqb - 1) *
741 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
746 /* Free next to use sqb */
747 if (rsp->sq.next_sqb)
748 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
753 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
756 struct mbox *mbox = (&nix->dev)->mbox;
757 struct nix_cn10k_aq_enq_req *aq;
759 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
761 aq->ctype = NIX_AQ_CTYPE_SQ;
762 aq->op = NIX_AQ_INSTOP_INIT;
763 aq->sq.max_sqe_size = sq->max_sqe_sz;
765 aq->sq.max_sqe_size = sq->max_sqe_sz;
767 aq->sq.smq_rr_weight = rr_quantum;
768 aq->sq.default_chan = nix->tx_chan_base;
769 aq->sq.sqe_stype = NIX_STYPE_STF;
771 aq->sq.sso_ena = !!sq->sso_ena;
772 aq->sq.cq_ena = !!sq->cq_ena;
773 aq->sq.cq = sq->cqid;
774 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
775 aq->sq.sqe_stype = NIX_STYPE_STP;
776 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
777 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
778 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
779 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
780 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
782 /* Many to one reduction */
783 aq->sq.qint_idx = sq->qid % nix->qints;
787 sq_fini(struct nix *nix, struct roc_nix_sq *sq)
789 struct mbox *mbox = (&nix->dev)->mbox;
790 struct nix_cn10k_aq_enq_rsp *rsp;
791 struct nix_cn10k_aq_enq_req *aq;
792 uint16_t sqes_per_sqb;
796 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
798 aq->ctype = NIX_AQ_CTYPE_SQ;
799 aq->op = NIX_AQ_INSTOP_READ;
800 rc = mbox_process_msg(mbox, (void *)&rsp);
804 /* Check if sq is already cleaned up */
809 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
811 aq->ctype = NIX_AQ_CTYPE_SQ;
812 aq->op = NIX_AQ_INSTOP_WRITE;
813 aq->sq_mask.ena = ~aq->sq_mask.ena;
815 rc = mbox_process(mbox);
819 /* Read SQ and free sqb's */
820 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
822 aq->ctype = NIX_AQ_CTYPE_SQ;
823 aq->op = NIX_AQ_INSTOP_READ;
824 rc = mbox_process_msg(mbox, (void *)&rsp);
829 plt_err("SQ has pending SQE's");
831 count = aq->sq.sqb_count;
832 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
833 /* Free SQB's that are used */
834 sqb_buf = (void *)rsp->sq.head_sqb;
838 next_sqb = *(void **)((uintptr_t)sqb_buf +
839 (uint32_t)((sqes_per_sqb - 1) *
841 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
846 /* Free next to use sqb */
847 if (rsp->sq.next_sqb)
848 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
853 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
855 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
856 struct mbox *mbox = (&nix->dev)->mbox;
857 uint16_t qid, smq = UINT16_MAX;
858 uint32_t rr_quantum = 0;
862 return NIX_ERR_PARAM;
865 if (qid >= nix->nb_tx_queues)
866 return NIX_ERR_QUEUE_INVALID_RANGE;
868 sq->roc_nix = roc_nix;
870 * Allocate memory for flow control updates from HW.
871 * Alloc one cache line, so that fits all FC_STYPE modes.
873 sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
874 if (sq->fc == NULL) {
879 rc = sqb_pool_populate(roc_nix, sq);
883 rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq);
885 rc = NIX_ERR_TM_LEAF_NODE_GET;
889 /* Init SQ context */
890 if (roc_model_is_cn9k())
891 sq_cn9k_init(nix, sq, rr_quantum, smq);
893 sq_init(nix, sq, rr_quantum, smq);
895 rc = mbox_process(mbox);
900 sq->io_addr = nix->base + NIX_LF_OP_SENDX(0);
901 /* Evenly distribute LMT slot for each sq */
902 if (roc_model_is_cn9k()) {
903 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */
904 sq->lmt_addr = (void *)(nix->lmt_base +
905 ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12));
908 rc = nix_tel_node_add_sq(sq);
917 roc_nix_sq_fini(struct roc_nix_sq *sq)
921 struct ndc_sync_op *ndc_req;
926 return NIX_ERR_PARAM;
928 nix = roc_nix_to_nix_priv(sq->roc_nix);
929 mbox = (&nix->dev)->mbox;
933 rc = nix_tm_sq_flush_pre(sq);
935 /* Release SQ context */
936 if (roc_model_is_cn9k())
937 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
939 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
941 /* Sync NDC-NIX-TX for LF */
942 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
945 ndc_req->nix_lf_tx_sync = 1;
946 if (mbox_process(mbox))
947 rc |= NIX_ERR_NDC_SYNC;
949 rc |= nix_tm_sq_flush_post(sq);
951 /* Restore limit to max SQB count that the pool was created
952 * for aura drain to succeed.
954 roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB);
955 rc |= roc_npa_pool_destroy(sq->aura_handle);
957 plt_free(sq->sqe_mem);
958 nix->sqs[qid] = NULL;