1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 static inline uint32_t
11 nix_qsize_to_val(enum nix_q_size qsize)
13 return (16UL << (qsize * 2));
16 static inline enum nix_q_size
17 nix_qsize_clampup(uint32_t val)
19 int i = nix_q_size_16;
21 for (; i < nix_q_size_max; i++)
22 if (val <= nix_qsize_to_val(i))
25 if (i >= nix_q_size_max)
26 i = nix_q_size_max - 1;
32 nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval)
36 if (!roc_model_is_cn10k())
38 /* Due to HW errata writes to VWQE_FLUSH might hang, so instead
39 * wait for max vwqe timeout interval.
42 wait_ns = rq->vwqe_wait_tmo * (vwqe_interval + 1) * 100;
43 plt_delay_us((wait_ns / 1E3) + 1);
48 nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
50 struct mbox *mbox = dev->mbox;
52 /* Pkts will be dropped silently if RQ is disabled */
53 if (roc_model_is_cn9k()) {
54 struct nix_aq_enq_req *aq;
56 aq = mbox_alloc_msg_nix_aq_enq(mbox);
58 aq->ctype = NIX_AQ_CTYPE_RQ;
59 aq->op = NIX_AQ_INSTOP_WRITE;
62 aq->rq_mask.ena = ~(aq->rq_mask.ena);
64 struct nix_cn10k_aq_enq_req *aq;
66 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
68 aq->ctype = NIX_AQ_CTYPE_RQ;
69 aq->op = NIX_AQ_INSTOP_WRITE;
72 aq->rq_mask.ena = ~(aq->rq_mask.ena);
75 return mbox_process(mbox);
79 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
81 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
84 rc = nix_rq_ena_dis(&nix->dev, rq, enable);
85 nix_rq_vwqe_flush(rq, nix->vwqe_interval);
91 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
94 struct mbox *mbox = dev->mbox;
95 struct nix_aq_enq_req *aq;
97 aq = mbox_alloc_msg_nix_aq_enq(mbox);
99 aq->ctype = NIX_AQ_CTYPE_RQ;
100 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
105 aq->rq.sso_tt = rq->tt;
106 aq->rq.sso_grp = rq->hwgrp;
108 aq->rq.wqe_skip = rq->wqe_skip;
109 aq->rq.wqe_caching = 1;
111 aq->rq.good_utag = rq->tag_mask >> 24;
112 aq->rq.bad_utag = rq->tag_mask >> 24;
113 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
117 aq->rq.good_utag = rq->tag_mask >> 24;
118 aq->rq.bad_utag = rq->tag_mask >> 24;
119 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
124 aq->rq.ipsech_ena = 1;
127 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
129 /* Sizes must be aligned to 8 bytes */
130 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
133 /* Expressed in number of dwords */
134 aq->rq.first_skip = rq->first_skip / 8;
135 aq->rq.later_skip = rq->later_skip / 8;
136 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
137 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
138 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
140 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
141 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
142 aq->rq.rq_int_ena = 0;
143 /* Many to one reduction */
144 aq->rq.qint_idx = rq->qid % qints;
145 aq->rq.xqe_drop_ena = 1;
147 /* If RED enabled, then fill enable for all cases */
148 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
149 aq->rq.spb_pool_pass = rq->spb_red_pass;
150 aq->rq.lpb_pool_pass = rq->red_pass;
152 aq->rq.spb_pool_drop = rq->spb_red_drop;
153 aq->rq.lpb_pool_drop = rq->red_drop;
159 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
160 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
161 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
162 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
163 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
164 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
165 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
166 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
167 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
170 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
171 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
172 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
173 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
174 aq->rq_mask.cq = ~aq->rq_mask.cq;
178 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
180 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
181 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
182 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
183 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
184 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
185 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
186 aq->rq_mask.ena = ~aq->rq_mask.ena;
187 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
188 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
189 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
190 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
191 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
193 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
194 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
195 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
197 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
198 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
206 nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
209 struct nix_cn10k_aq_enq_req *aq;
210 struct mbox *mbox = dev->mbox;
212 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
214 aq->ctype = NIX_AQ_CTYPE_RQ;
215 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
220 aq->rq.sso_tt = rq->tt;
221 aq->rq.sso_grp = rq->hwgrp;
223 aq->rq.wqe_skip = rq->wqe_skip;
224 aq->rq.wqe_caching = 1;
226 aq->rq.good_utag = rq->tag_mask >> 24;
227 aq->rq.bad_utag = rq->tag_mask >> 24;
228 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
231 aq->rq.vwqe_ena = true;
232 aq->rq.vwqe_skip = rq->vwqe_first_skip;
233 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
234 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
235 aq->rq.vtime_wait = rq->vwqe_wait_tmo;
236 aq->rq.wqe_aura = rq->vwqe_aura_handle;
241 aq->rq.good_utag = rq->tag_mask >> 24;
242 aq->rq.bad_utag = rq->tag_mask >> 24;
243 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
247 if (rq->ipsech_ena) {
248 aq->rq.ipsech_ena = 1;
249 aq->rq.ipsecd_drop_en = 1;
252 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
254 /* Sizes must be aligned to 8 bytes */
255 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
258 /* Expressed in number of dwords */
259 aq->rq.first_skip = rq->first_skip / 8;
260 aq->rq.later_skip = rq->later_skip / 8;
261 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
262 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
263 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
271 roc_npa_aura_handle_to_aura(rq->spb_aura_handle);
273 if (rq->spb_size & 0x7 ||
274 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE)
277 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */
278 spb_sizem1 -= 1; /* Expressed in size minus one */
279 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F;
280 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7;
285 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
286 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
287 aq->rq.rq_int_ena = 0;
288 /* Many to one reduction */
289 aq->rq.qint_idx = rq->qid % qints;
290 aq->rq.xqe_drop_ena = 1;
292 /* If RED enabled, then fill enable for all cases */
293 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
294 aq->rq.spb_pool_pass = rq->spb_red_pass;
295 aq->rq.lpb_pool_pass = rq->red_pass;
296 aq->rq.wqe_pool_pass = rq->red_pass;
297 aq->rq.xqe_pass = rq->red_pass;
299 aq->rq.spb_pool_drop = rq->spb_red_drop;
300 aq->rq.lpb_pool_drop = rq->red_drop;
301 aq->rq.wqe_pool_drop = rq->red_drop;
302 aq->rq.xqe_drop = rq->red_drop;
308 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
309 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
310 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
311 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
312 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
313 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
314 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
315 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
316 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
318 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena;
319 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip;
320 aq->rq_mask.max_vsize_exp =
321 ~aq->rq_mask.max_vsize_exp;
322 aq->rq_mask.vtime_wait =
323 ~aq->rq_mask.vtime_wait;
324 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura;
328 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
329 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
330 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
331 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
332 aq->rq_mask.cq = ~aq->rq_mask.cq;
336 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
339 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura;
340 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1;
341 aq->rq_mask.spb_high_sizem1 =
342 ~aq->rq_mask.spb_high_sizem1;
345 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
346 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
347 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
348 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
349 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
350 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
351 aq->rq_mask.ena = ~aq->rq_mask.ena;
352 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
353 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
354 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
355 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
356 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
358 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
359 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
360 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
361 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass;
362 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass;
364 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
365 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
366 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop;
367 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop;
375 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
377 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
378 struct mbox *mbox = (&nix->dev)->mbox;
379 bool is_cn9k = roc_model_is_cn9k();
380 struct dev *dev = &nix->dev;
383 if (roc_nix == NULL || rq == NULL)
384 return NIX_ERR_PARAM;
386 if (rq->qid >= nix->nb_rx_queues)
387 return NIX_ERR_QUEUE_INVALID_RANGE;
389 rq->roc_nix = roc_nix;
392 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena);
394 rc = nix_rq_cfg(dev, rq, nix->qints, false, ena);
399 rc = mbox_process(mbox);
403 return nix_tel_node_add_rq(rq);
407 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
409 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
410 struct mbox *mbox = (&nix->dev)->mbox;
411 bool is_cn9k = roc_model_is_cn9k();
412 struct dev *dev = &nix->dev;
415 if (roc_nix == NULL || rq == NULL)
416 return NIX_ERR_PARAM;
418 if (rq->qid >= nix->nb_rx_queues)
419 return NIX_ERR_QUEUE_INVALID_RANGE;
421 rq->roc_nix = roc_nix;
424 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena);
426 rc = nix_rq_cfg(dev, rq, nix->qints, true, ena);
431 rc = mbox_process(mbox);
435 return nix_tel_node_add_rq(rq);
439 roc_nix_rq_fini(struct roc_nix_rq *rq)
441 /* Disabling RQ is sufficient */
442 return roc_nix_rq_ena_dis(rq, false);
446 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
448 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
449 struct mbox *mbox = (&nix->dev)->mbox;
450 volatile struct nix_cq_ctx_s *cq_ctx;
451 enum nix_q_size qsize;
456 return NIX_ERR_PARAM;
458 if (cq->qid >= nix->nb_rx_queues)
459 return NIX_ERR_QUEUE_INVALID_RANGE;
461 qsize = nix_qsize_clampup(cq->nb_desc);
462 cq->nb_desc = nix_qsize_to_val(qsize);
463 cq->qmask = cq->nb_desc - 1;
464 cq->door = nix->base + NIX_LF_CQ_OP_DOOR;
465 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
466 cq->wdata = (uint64_t)cq->qid << 32;
467 cq->roc_nix = roc_nix;
470 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ;
471 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN);
472 if (cq->desc_base == NULL) {
477 if (roc_model_is_cn9k()) {
478 struct nix_aq_enq_req *aq;
480 aq = mbox_alloc_msg_nix_aq_enq(mbox);
482 aq->ctype = NIX_AQ_CTYPE_CQ;
483 aq->op = NIX_AQ_INSTOP_INIT;
486 struct nix_cn10k_aq_enq_req *aq;
488 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
490 aq->ctype = NIX_AQ_CTYPE_CQ;
491 aq->op = NIX_AQ_INSTOP_INIT;
497 cq_ctx->qsize = qsize;
498 cq_ctx->base = (uint64_t)cq->desc_base;
499 cq_ctx->avg_level = 0xff;
500 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
501 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
503 /* Many to one reduction */
504 cq_ctx->qint_idx = cq->qid % nix->qints;
505 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
506 cq_ctx->cint_idx = cq->qid;
508 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
509 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
510 uint16_t min_rx_drop;
512 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc);
513 cq_ctx->drop = min_rx_drop;
514 cq_ctx->drop_ena = 1;
515 cq->drop_thresh = min_rx_drop;
517 cq->drop_thresh = NIX_CQ_THRESH_LEVEL;
518 /* Drop processing or red drop cannot be enabled due to
519 * due to packets coming for second pass from CPT.
521 if (!roc_nix_inl_inb_is_enabled(roc_nix)) {
522 cq_ctx->drop = cq->drop_thresh;
523 cq_ctx->drop_ena = 1;
527 /* TX pause frames enable flow ctrl on RX side */
529 /* Single BPID is allocated for all rx channels for now */
530 cq_ctx->bpid = nix->bpid[0];
531 cq_ctx->bp = cq->drop_thresh;
535 rc = mbox_process(mbox);
539 return nix_tel_node_add_cq(cq);
542 plt_free(cq->desc_base);
548 roc_nix_cq_fini(struct roc_nix_cq *cq)
555 return NIX_ERR_PARAM;
557 nix = roc_nix_to_nix_priv(cq->roc_nix);
558 mbox = (&nix->dev)->mbox;
561 if (roc_model_is_cn9k()) {
562 struct nix_aq_enq_req *aq;
564 aq = mbox_alloc_msg_nix_aq_enq(mbox);
566 aq->ctype = NIX_AQ_CTYPE_CQ;
567 aq->op = NIX_AQ_INSTOP_WRITE;
570 aq->cq_mask.ena = ~aq->cq_mask.ena;
571 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
573 struct nix_cn10k_aq_enq_req *aq;
575 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
577 aq->ctype = NIX_AQ_CTYPE_CQ;
578 aq->op = NIX_AQ_INSTOP_WRITE;
581 aq->cq_mask.ena = ~aq->cq_mask.ena;
582 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
585 rc = mbox_process(mbox);
589 plt_free(cq->desc_base);
594 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
596 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
597 uint16_t sqes_per_sqb, count, nb_sqb_bufs;
598 struct npa_pool_s pool;
599 struct npa_aura_s aura;
604 blk_sz = nix->sqb_size;
605 if (sq->max_sqe_sz == roc_nix_maxsqesz_w16)
606 sqes_per_sqb = (blk_sz / 8) / 16;
608 sqes_per_sqb = (blk_sz / 8) / 8;
610 sq->nb_desc = PLT_MAX(256U, sq->nb_desc);
611 nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
612 nb_sqb_bufs += NIX_SQB_LIST_SPACE;
613 /* Clamp up the SQB count */
614 nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
615 (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
617 sq->nb_sqb_bufs = nb_sqb_bufs;
618 sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
619 sq->nb_sqb_bufs_adj =
621 (PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
622 sq->nb_sqb_bufs_adj =
623 (sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100;
625 /* Explicitly set nat_align alone as by default pool is with both
626 * nat_align and buf_offset = 1 which we don't want for SQB.
628 memset(&pool, 0, sizeof(struct npa_pool_s));
631 memset(&aura, 0, sizeof(aura));
633 if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0())
634 aura.fc_stype = 0x0; /* STF */
636 aura.fc_stype = 0x3; /* STSTP */
637 aura.fc_addr = (uint64_t)sq->fc;
638 aura.fc_hyst_bits = 0; /* Store count on all updates */
639 rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
644 sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
645 if (sq->sqe_mem == NULL) {
650 /* Fill the initial buffers */
651 iova = (uint64_t)sq->sqe_mem;
652 for (count = 0; count < NIX_MAX_SQB; count++) {
653 roc_npa_aura_op_free(sq->aura_handle, 0, iova);
656 roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
657 roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
658 sq->aura_sqb_bufs = NIX_MAX_SQB;
662 roc_npa_pool_destroy(sq->aura_handle);
668 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
671 struct mbox *mbox = (&nix->dev)->mbox;
672 struct nix_aq_enq_req *aq;
674 aq = mbox_alloc_msg_nix_aq_enq(mbox);
676 aq->ctype = NIX_AQ_CTYPE_SQ;
677 aq->op = NIX_AQ_INSTOP_INIT;
678 aq->sq.max_sqe_size = sq->max_sqe_sz;
680 aq->sq.max_sqe_size = sq->max_sqe_sz;
682 aq->sq.smq_rr_quantum = rr_quantum;
683 aq->sq.default_chan = nix->tx_chan_base;
684 aq->sq.sqe_stype = NIX_STYPE_STF;
686 aq->sq.sso_ena = !!sq->sso_ena;
687 aq->sq.cq_ena = !!sq->cq_ena;
688 aq->sq.cq = sq->cqid;
689 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
690 aq->sq.sqe_stype = NIX_STYPE_STP;
691 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
692 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
693 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
694 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
695 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
697 /* Many to one reduction */
698 aq->sq.qint_idx = sq->qid % nix->qints;
702 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
704 struct mbox *mbox = (&nix->dev)->mbox;
705 struct nix_aq_enq_rsp *rsp;
706 struct nix_aq_enq_req *aq;
707 uint16_t sqes_per_sqb;
711 aq = mbox_alloc_msg_nix_aq_enq(mbox);
713 aq->ctype = NIX_AQ_CTYPE_SQ;
714 aq->op = NIX_AQ_INSTOP_READ;
715 rc = mbox_process_msg(mbox, (void *)&rsp);
719 /* Check if sq is already cleaned up */
724 aq = mbox_alloc_msg_nix_aq_enq(mbox);
726 aq->ctype = NIX_AQ_CTYPE_SQ;
727 aq->op = NIX_AQ_INSTOP_WRITE;
728 aq->sq_mask.ena = ~aq->sq_mask.ena;
730 rc = mbox_process(mbox);
734 /* Read SQ and free sqb's */
735 aq = mbox_alloc_msg_nix_aq_enq(mbox);
737 aq->ctype = NIX_AQ_CTYPE_SQ;
738 aq->op = NIX_AQ_INSTOP_READ;
739 rc = mbox_process_msg(mbox, (void *)&rsp);
744 plt_err("SQ has pending SQE's");
746 count = aq->sq.sqb_count;
747 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
748 /* Free SQB's that are used */
749 sqb_buf = (void *)rsp->sq.head_sqb;
753 next_sqb = *(void **)((uintptr_t)sqb_buf +
754 (uint32_t)((sqes_per_sqb - 1) *
756 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
761 /* Free next to use sqb */
762 if (rsp->sq.next_sqb)
763 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
768 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
771 struct mbox *mbox = (&nix->dev)->mbox;
772 struct nix_cn10k_aq_enq_req *aq;
774 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
776 aq->ctype = NIX_AQ_CTYPE_SQ;
777 aq->op = NIX_AQ_INSTOP_INIT;
778 aq->sq.max_sqe_size = sq->max_sqe_sz;
780 aq->sq.max_sqe_size = sq->max_sqe_sz;
782 aq->sq.smq_rr_weight = rr_quantum;
783 aq->sq.default_chan = nix->tx_chan_base;
784 aq->sq.sqe_stype = NIX_STYPE_STF;
786 aq->sq.sso_ena = !!sq->sso_ena;
787 aq->sq.cq_ena = !!sq->cq_ena;
788 aq->sq.cq = sq->cqid;
789 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
790 aq->sq.sqe_stype = NIX_STYPE_STP;
791 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
792 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
793 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
794 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
795 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
797 /* Many to one reduction */
798 aq->sq.qint_idx = sq->qid % nix->qints;
802 sq_fini(struct nix *nix, struct roc_nix_sq *sq)
804 struct mbox *mbox = (&nix->dev)->mbox;
805 struct nix_cn10k_aq_enq_rsp *rsp;
806 struct nix_cn10k_aq_enq_req *aq;
807 uint16_t sqes_per_sqb;
811 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
813 aq->ctype = NIX_AQ_CTYPE_SQ;
814 aq->op = NIX_AQ_INSTOP_READ;
815 rc = mbox_process_msg(mbox, (void *)&rsp);
819 /* Check if sq is already cleaned up */
824 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
826 aq->ctype = NIX_AQ_CTYPE_SQ;
827 aq->op = NIX_AQ_INSTOP_WRITE;
828 aq->sq_mask.ena = ~aq->sq_mask.ena;
830 rc = mbox_process(mbox);
834 /* Read SQ and free sqb's */
835 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
837 aq->ctype = NIX_AQ_CTYPE_SQ;
838 aq->op = NIX_AQ_INSTOP_READ;
839 rc = mbox_process_msg(mbox, (void *)&rsp);
844 plt_err("SQ has pending SQE's");
846 count = aq->sq.sqb_count;
847 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
848 /* Free SQB's that are used */
849 sqb_buf = (void *)rsp->sq.head_sqb;
853 next_sqb = *(void **)((uintptr_t)sqb_buf +
854 (uint32_t)((sqes_per_sqb - 1) *
856 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
861 /* Free next to use sqb */
862 if (rsp->sq.next_sqb)
863 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
868 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
870 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
871 struct mbox *mbox = (&nix->dev)->mbox;
872 uint16_t qid, smq = UINT16_MAX;
873 uint32_t rr_quantum = 0;
877 return NIX_ERR_PARAM;
880 if (qid >= nix->nb_tx_queues)
881 return NIX_ERR_QUEUE_INVALID_RANGE;
883 sq->roc_nix = roc_nix;
885 * Allocate memory for flow control updates from HW.
886 * Alloc one cache line, so that fits all FC_STYPE modes.
888 sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
889 if (sq->fc == NULL) {
894 rc = sqb_pool_populate(roc_nix, sq);
898 rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq);
900 rc = NIX_ERR_TM_LEAF_NODE_GET;
904 /* Init SQ context */
905 if (roc_model_is_cn9k())
906 sq_cn9k_init(nix, sq, rr_quantum, smq);
908 sq_init(nix, sq, rr_quantum, smq);
910 rc = mbox_process(mbox);
915 sq->io_addr = nix->base + NIX_LF_OP_SENDX(0);
916 /* Evenly distribute LMT slot for each sq */
917 if (roc_model_is_cn9k()) {
918 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */
919 sq->lmt_addr = (void *)(nix->lmt_base +
920 ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12));
923 rc = nix_tel_node_add_sq(sq);
932 roc_nix_sq_fini(struct roc_nix_sq *sq)
936 struct ndc_sync_op *ndc_req;
941 return NIX_ERR_PARAM;
943 nix = roc_nix_to_nix_priv(sq->roc_nix);
944 mbox = (&nix->dev)->mbox;
948 rc = nix_tm_sq_flush_pre(sq);
950 /* Release SQ context */
951 if (roc_model_is_cn9k())
952 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
954 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
956 /* Sync NDC-NIX-TX for LF */
957 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
960 ndc_req->nix_lf_tx_sync = 1;
961 if (mbox_process(mbox))
962 rc |= NIX_ERR_NDC_SYNC;
964 rc |= nix_tm_sq_flush_post(sq);
966 /* Restore limit to max SQB count that the pool was created
967 * for aura drain to succeed.
969 roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB);
970 rc |= roc_npa_pool_destroy(sq->aura_handle);
972 plt_free(sq->sqe_mem);
973 nix->sqs[qid] = NULL;