1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 static inline uint32_t
11 nix_qsize_to_val(enum nix_q_size qsize)
13 return (16UL << (qsize * 2));
16 static inline enum nix_q_size
17 nix_qsize_clampup(uint32_t val)
19 int i = nix_q_size_16;
21 for (; i < nix_q_size_max; i++)
22 if (val <= nix_qsize_to_val(i))
25 if (i >= nix_q_size_max)
26 i = nix_q_size_max - 1;
32 nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval)
36 if (!roc_model_is_cn10k())
38 /* Due to HW errata writes to VWQE_FLUSH might hang, so instead
39 * wait for max vwqe timeout interval.
42 wait_ns = rq->vwqe_wait_tmo * (vwqe_interval + 1) * 100;
43 plt_delay_us((wait_ns / 1E3) + 1);
48 nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
50 struct mbox *mbox = dev->mbox;
52 /* Pkts will be dropped silently if RQ is disabled */
53 if (roc_model_is_cn9k()) {
54 struct nix_aq_enq_req *aq;
56 aq = mbox_alloc_msg_nix_aq_enq(mbox);
61 aq->ctype = NIX_AQ_CTYPE_RQ;
62 aq->op = NIX_AQ_INSTOP_WRITE;
65 aq->rq_mask.ena = ~(aq->rq_mask.ena);
67 struct nix_cn10k_aq_enq_req *aq;
69 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
74 aq->ctype = NIX_AQ_CTYPE_RQ;
75 aq->op = NIX_AQ_INSTOP_WRITE;
78 aq->rq_mask.ena = ~(aq->rq_mask.ena);
81 return mbox_process(mbox);
85 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
87 struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
90 rc = nix_rq_ena_dis(&nix->dev, rq, enable);
91 nix_rq_vwqe_flush(rq, nix->vwqe_interval);
97 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
100 struct mbox *mbox = dev->mbox;
101 struct nix_aq_enq_req *aq;
103 aq = mbox_alloc_msg_nix_aq_enq(mbox);
108 aq->ctype = NIX_AQ_CTYPE_RQ;
109 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
114 aq->rq.sso_tt = rq->tt;
115 aq->rq.sso_grp = rq->hwgrp;
117 aq->rq.wqe_skip = rq->wqe_skip;
118 aq->rq.wqe_caching = 1;
120 aq->rq.good_utag = rq->tag_mask >> 24;
121 aq->rq.bad_utag = rq->tag_mask >> 24;
122 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
126 aq->rq.good_utag = rq->tag_mask >> 24;
127 aq->rq.bad_utag = rq->tag_mask >> 24;
128 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
133 aq->rq.ipsech_ena = 1;
136 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
138 /* Sizes must be aligned to 8 bytes */
139 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
142 /* Expressed in number of dwords */
143 aq->rq.first_skip = rq->first_skip / 8;
144 aq->rq.later_skip = rq->later_skip / 8;
145 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
146 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
147 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
149 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
150 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
151 aq->rq.rq_int_ena = 0;
152 /* Many to one reduction */
153 aq->rq.qint_idx = rq->qid % qints;
154 aq->rq.xqe_drop_ena = 1;
156 /* If RED enabled, then fill enable for all cases */
157 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
158 aq->rq.spb_pool_pass = rq->spb_red_pass;
159 aq->rq.lpb_pool_pass = rq->red_pass;
161 aq->rq.spb_pool_drop = rq->spb_red_drop;
162 aq->rq.lpb_pool_drop = rq->red_drop;
168 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
169 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
170 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
171 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
172 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
173 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
174 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
175 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
176 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
179 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
180 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
181 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
182 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
183 aq->rq_mask.cq = ~aq->rq_mask.cq;
187 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
189 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
190 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
191 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
192 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
193 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
194 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
195 aq->rq_mask.ena = ~aq->rq_mask.ena;
196 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
197 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
198 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
199 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
200 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
202 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
203 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
204 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
206 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
207 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
215 nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
218 struct nix_cn10k_aq_enq_req *aq;
219 struct mbox *mbox = dev->mbox;
221 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
226 aq->ctype = NIX_AQ_CTYPE_RQ;
227 aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
232 aq->rq.sso_tt = rq->tt;
233 aq->rq.sso_grp = rq->hwgrp;
235 aq->rq.wqe_skip = rq->wqe_skip;
236 aq->rq.wqe_caching = 1;
238 aq->rq.good_utag = rq->tag_mask >> 24;
239 aq->rq.bad_utag = rq->tag_mask >> 24;
240 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
243 aq->rq.vwqe_ena = true;
244 aq->rq.vwqe_skip = rq->vwqe_first_skip;
245 /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
246 aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
247 aq->rq.vtime_wait = rq->vwqe_wait_tmo;
248 aq->rq.wqe_aura = rq->vwqe_aura_handle;
253 aq->rq.good_utag = rq->tag_mask >> 24;
254 aq->rq.bad_utag = rq->tag_mask >> 24;
255 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
259 if (rq->ipsech_ena) {
260 aq->rq.ipsech_ena = 1;
261 aq->rq.ipsecd_drop_en = 1;
264 aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
266 /* Sizes must be aligned to 8 bytes */
267 if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
270 /* Expressed in number of dwords */
271 aq->rq.first_skip = rq->first_skip / 8;
272 aq->rq.later_skip = rq->later_skip / 8;
273 aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
274 aq->rq.lpb_sizem1 = rq->lpb_size / 8;
275 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
283 roc_npa_aura_handle_to_aura(rq->spb_aura_handle);
285 if (rq->spb_size & 0x7 ||
286 rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE)
289 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */
290 spb_sizem1 -= 1; /* Expressed in size minus one */
291 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F;
292 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7;
297 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
298 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
299 aq->rq.rq_int_ena = 0;
300 /* Many to one reduction */
301 aq->rq.qint_idx = rq->qid % qints;
302 aq->rq.xqe_drop_ena = 1;
304 /* If RED enabled, then fill enable for all cases */
305 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
306 aq->rq.spb_pool_pass = rq->spb_red_pass;
307 aq->rq.lpb_pool_pass = rq->red_pass;
308 aq->rq.wqe_pool_pass = rq->red_pass;
309 aq->rq.xqe_pass = rq->red_pass;
311 aq->rq.spb_pool_drop = rq->spb_red_drop;
312 aq->rq.lpb_pool_drop = rq->red_drop;
313 aq->rq.wqe_pool_drop = rq->red_drop;
314 aq->rq.xqe_drop = rq->red_drop;
320 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
321 aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
322 aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
323 aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
324 aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
325 aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
326 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
327 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
328 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
330 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena;
331 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip;
332 aq->rq_mask.max_vsize_exp =
333 ~aq->rq_mask.max_vsize_exp;
334 aq->rq_mask.vtime_wait =
335 ~aq->rq_mask.vtime_wait;
336 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura;
340 aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
341 aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
342 aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
343 aq->rq_mask.ltag = ~aq->rq_mask.ltag;
344 aq->rq_mask.cq = ~aq->rq_mask.cq;
348 aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
351 aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura;
352 aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1;
353 aq->rq_mask.spb_high_sizem1 =
354 ~aq->rq_mask.spb_high_sizem1;
357 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
358 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
359 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
360 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
361 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
362 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
363 aq->rq_mask.ena = ~aq->rq_mask.ena;
364 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
365 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
366 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
367 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
368 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
370 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
371 aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
372 aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
373 aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass;
374 aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass;
376 aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
377 aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
378 aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop;
379 aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop;
387 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
389 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
390 struct mbox *mbox = (&nix->dev)->mbox;
391 bool is_cn9k = roc_model_is_cn9k();
392 struct dev *dev = &nix->dev;
395 if (roc_nix == NULL || rq == NULL)
396 return NIX_ERR_PARAM;
398 if (rq->qid >= nix->nb_rx_queues)
399 return NIX_ERR_QUEUE_INVALID_RANGE;
401 rq->roc_nix = roc_nix;
404 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena);
406 rc = nix_rq_cfg(dev, rq, nix->qints, false, ena);
411 rc = mbox_process(mbox);
415 return nix_tel_node_add_rq(rq);
419 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
421 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
422 struct mbox *mbox = (&nix->dev)->mbox;
423 bool is_cn9k = roc_model_is_cn9k();
424 struct dev *dev = &nix->dev;
427 if (roc_nix == NULL || rq == NULL)
428 return NIX_ERR_PARAM;
430 if (rq->qid >= nix->nb_rx_queues)
431 return NIX_ERR_QUEUE_INVALID_RANGE;
433 rq->roc_nix = roc_nix;
436 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena);
438 rc = nix_rq_cfg(dev, rq, nix->qints, true, ena);
443 rc = mbox_process(mbox);
447 return nix_tel_node_add_rq(rq);
451 roc_nix_rq_fini(struct roc_nix_rq *rq)
453 /* Disabling RQ is sufficient */
454 return roc_nix_rq_ena_dis(rq, false);
458 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
460 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
461 struct mbox *mbox = (&nix->dev)->mbox;
462 volatile struct nix_cq_ctx_s *cq_ctx;
463 enum nix_q_size qsize;
468 return NIX_ERR_PARAM;
470 if (cq->qid >= nix->nb_rx_queues)
471 return NIX_ERR_QUEUE_INVALID_RANGE;
473 qsize = nix_qsize_clampup(cq->nb_desc);
474 cq->nb_desc = nix_qsize_to_val(qsize);
475 cq->qmask = cq->nb_desc - 1;
476 cq->door = nix->base + NIX_LF_CQ_OP_DOOR;
477 cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
478 cq->wdata = (uint64_t)cq->qid << 32;
479 cq->roc_nix = roc_nix;
482 desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ;
483 cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN);
484 if (cq->desc_base == NULL) {
489 if (roc_model_is_cn9k()) {
490 struct nix_aq_enq_req *aq;
492 aq = mbox_alloc_msg_nix_aq_enq(mbox);
497 aq->ctype = NIX_AQ_CTYPE_CQ;
498 aq->op = NIX_AQ_INSTOP_INIT;
501 struct nix_cn10k_aq_enq_req *aq;
503 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
508 aq->ctype = NIX_AQ_CTYPE_CQ;
509 aq->op = NIX_AQ_INSTOP_INIT;
515 cq_ctx->qsize = qsize;
516 cq_ctx->base = (uint64_t)cq->desc_base;
517 cq_ctx->avg_level = 0xff;
518 cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
519 cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
521 /* Many to one reduction */
522 cq_ctx->qint_idx = cq->qid % nix->qints;
523 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
524 cq_ctx->cint_idx = cq->qid;
526 if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
527 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
528 uint16_t min_rx_drop;
530 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc);
531 cq_ctx->drop = min_rx_drop;
532 cq_ctx->drop_ena = 1;
533 cq->drop_thresh = min_rx_drop;
535 cq->drop_thresh = NIX_CQ_THRESH_LEVEL;
536 /* Drop processing or red drop cannot be enabled due to
537 * due to packets coming for second pass from CPT.
539 if (!roc_nix_inl_inb_is_enabled(roc_nix)) {
540 cq_ctx->drop = cq->drop_thresh;
541 cq_ctx->drop_ena = 1;
545 /* TX pause frames enable flow ctrl on RX side */
547 /* Single BPID is allocated for all rx channels for now */
548 cq_ctx->bpid = nix->bpid[0];
549 cq_ctx->bp = cq->drop_thresh;
553 rc = mbox_process(mbox);
557 return nix_tel_node_add_cq(cq);
560 plt_free(cq->desc_base);
566 roc_nix_cq_fini(struct roc_nix_cq *cq)
573 return NIX_ERR_PARAM;
575 nix = roc_nix_to_nix_priv(cq->roc_nix);
576 mbox = (&nix->dev)->mbox;
579 if (roc_model_is_cn9k()) {
580 struct nix_aq_enq_req *aq;
582 aq = mbox_alloc_msg_nix_aq_enq(mbox);
587 aq->ctype = NIX_AQ_CTYPE_CQ;
588 aq->op = NIX_AQ_INSTOP_WRITE;
591 aq->cq_mask.ena = ~aq->cq_mask.ena;
592 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
594 struct nix_cn10k_aq_enq_req *aq;
596 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
601 aq->ctype = NIX_AQ_CTYPE_CQ;
602 aq->op = NIX_AQ_INSTOP_WRITE;
605 aq->cq_mask.ena = ~aq->cq_mask.ena;
606 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
609 rc = mbox_process(mbox);
613 plt_free(cq->desc_base);
618 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
620 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
621 uint16_t sqes_per_sqb, count, nb_sqb_bufs;
622 struct npa_pool_s pool;
623 struct npa_aura_s aura;
628 blk_sz = nix->sqb_size;
629 if (sq->max_sqe_sz == roc_nix_maxsqesz_w16)
630 sqes_per_sqb = (blk_sz / 8) / 16;
632 sqes_per_sqb = (blk_sz / 8) / 8;
634 sq->nb_desc = PLT_MAX(256U, sq->nb_desc);
635 nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
636 nb_sqb_bufs += NIX_SQB_LIST_SPACE;
637 /* Clamp up the SQB count */
638 nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
639 (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
641 sq->nb_sqb_bufs = nb_sqb_bufs;
642 sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
643 sq->nb_sqb_bufs_adj =
645 (PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
646 sq->nb_sqb_bufs_adj =
647 (sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100;
649 /* Explicitly set nat_align alone as by default pool is with both
650 * nat_align and buf_offset = 1 which we don't want for SQB.
652 memset(&pool, 0, sizeof(struct npa_pool_s));
655 memset(&aura, 0, sizeof(aura));
657 if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0())
658 aura.fc_stype = 0x0; /* STF */
660 aura.fc_stype = 0x3; /* STSTP */
661 aura.fc_addr = (uint64_t)sq->fc;
662 aura.fc_hyst_bits = 0; /* Store count on all updates */
663 rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
668 sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
669 if (sq->sqe_mem == NULL) {
674 /* Fill the initial buffers */
675 iova = (uint64_t)sq->sqe_mem;
676 for (count = 0; count < NIX_MAX_SQB; count++) {
677 roc_npa_aura_op_free(sq->aura_handle, 0, iova);
681 if (roc_npa_aura_op_available_wait(sq->aura_handle, NIX_MAX_SQB, 0) !=
683 plt_err("Failed to free all pointers to the pool");
688 roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
689 roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
690 sq->aura_sqb_bufs = NIX_MAX_SQB;
694 plt_free(sq->sqe_mem);
696 roc_npa_pool_destroy(sq->aura_handle);
702 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
705 struct mbox *mbox = (&nix->dev)->mbox;
706 struct nix_aq_enq_req *aq;
708 aq = mbox_alloc_msg_nix_aq_enq(mbox);
713 aq->ctype = NIX_AQ_CTYPE_SQ;
714 aq->op = NIX_AQ_INSTOP_INIT;
715 aq->sq.max_sqe_size = sq->max_sqe_sz;
717 aq->sq.max_sqe_size = sq->max_sqe_sz;
719 aq->sq.smq_rr_quantum = rr_quantum;
720 aq->sq.default_chan = nix->tx_chan_base;
721 aq->sq.sqe_stype = NIX_STYPE_STF;
723 aq->sq.sso_ena = !!sq->sso_ena;
724 aq->sq.cq_ena = !!sq->cq_ena;
725 aq->sq.cq = sq->cqid;
726 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
727 aq->sq.sqe_stype = NIX_STYPE_STP;
728 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
729 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
730 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
731 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
732 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
734 /* Many to one reduction */
735 /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can
736 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This
737 * might result in software missing the interrupt.
744 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
746 struct mbox *mbox = (&nix->dev)->mbox;
747 struct nix_aq_enq_rsp *rsp;
748 struct nix_aq_enq_req *aq;
749 uint16_t sqes_per_sqb;
753 aq = mbox_alloc_msg_nix_aq_enq(mbox);
758 aq->ctype = NIX_AQ_CTYPE_SQ;
759 aq->op = NIX_AQ_INSTOP_READ;
760 rc = mbox_process_msg(mbox, (void *)&rsp);
764 /* Check if sq is already cleaned up */
769 aq = mbox_alloc_msg_nix_aq_enq(mbox);
774 aq->ctype = NIX_AQ_CTYPE_SQ;
775 aq->op = NIX_AQ_INSTOP_WRITE;
776 aq->sq_mask.ena = ~aq->sq_mask.ena;
778 rc = mbox_process(mbox);
782 /* Read SQ and free sqb's */
783 aq = mbox_alloc_msg_nix_aq_enq(mbox);
788 aq->ctype = NIX_AQ_CTYPE_SQ;
789 aq->op = NIX_AQ_INSTOP_READ;
790 rc = mbox_process_msg(mbox, (void *)&rsp);
795 plt_err("SQ has pending SQE's");
797 count = aq->sq.sqb_count;
798 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
799 /* Free SQB's that are used */
800 sqb_buf = (void *)rsp->sq.head_sqb;
804 next_sqb = *(void **)((uintptr_t)sqb_buf +
805 (uint32_t)((sqes_per_sqb - 1) *
807 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
812 /* Free next to use sqb */
813 if (rsp->sq.next_sqb)
814 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
819 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
822 struct mbox *mbox = (&nix->dev)->mbox;
823 struct nix_cn10k_aq_enq_req *aq;
825 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
830 aq->ctype = NIX_AQ_CTYPE_SQ;
831 aq->op = NIX_AQ_INSTOP_INIT;
832 aq->sq.max_sqe_size = sq->max_sqe_sz;
834 aq->sq.max_sqe_size = sq->max_sqe_sz;
836 aq->sq.smq_rr_weight = rr_quantum;
837 aq->sq.default_chan = nix->tx_chan_base;
838 aq->sq.sqe_stype = NIX_STYPE_STF;
840 aq->sq.sso_ena = !!sq->sso_ena;
841 aq->sq.cq_ena = !!sq->cq_ena;
842 aq->sq.cq = sq->cqid;
843 if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
844 aq->sq.sqe_stype = NIX_STYPE_STP;
845 aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
846 aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
847 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
848 aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
849 aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
851 /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can
852 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This
853 * might result in software missing the interrupt.
860 sq_fini(struct nix *nix, struct roc_nix_sq *sq)
862 struct mbox *mbox = (&nix->dev)->mbox;
863 struct nix_cn10k_aq_enq_rsp *rsp;
864 struct nix_cn10k_aq_enq_req *aq;
865 uint16_t sqes_per_sqb;
869 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
874 aq->ctype = NIX_AQ_CTYPE_SQ;
875 aq->op = NIX_AQ_INSTOP_READ;
876 rc = mbox_process_msg(mbox, (void *)&rsp);
880 /* Check if sq is already cleaned up */
885 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
890 aq->ctype = NIX_AQ_CTYPE_SQ;
891 aq->op = NIX_AQ_INSTOP_WRITE;
892 aq->sq_mask.ena = ~aq->sq_mask.ena;
894 rc = mbox_process(mbox);
898 /* Read SQ and free sqb's */
899 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
904 aq->ctype = NIX_AQ_CTYPE_SQ;
905 aq->op = NIX_AQ_INSTOP_READ;
906 rc = mbox_process_msg(mbox, (void *)&rsp);
911 plt_err("SQ has pending SQE's");
913 count = aq->sq.sqb_count;
914 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
915 /* Free SQB's that are used */
916 sqb_buf = (void *)rsp->sq.head_sqb;
920 next_sqb = *(void **)((uintptr_t)sqb_buf +
921 (uint32_t)((sqes_per_sqb - 1) *
923 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
928 /* Free next to use sqb */
929 if (rsp->sq.next_sqb)
930 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
935 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
937 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
938 struct mbox *mbox = (&nix->dev)->mbox;
939 uint16_t qid, smq = UINT16_MAX;
940 uint32_t rr_quantum = 0;
944 return NIX_ERR_PARAM;
947 if (qid >= nix->nb_tx_queues)
948 return NIX_ERR_QUEUE_INVALID_RANGE;
950 sq->roc_nix = roc_nix;
952 * Allocate memory for flow control updates from HW.
953 * Alloc one cache line, so that fits all FC_STYPE modes.
955 sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
956 if (sq->fc == NULL) {
961 rc = sqb_pool_populate(roc_nix, sq);
965 rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq);
967 rc = NIX_ERR_TM_LEAF_NODE_GET;
971 /* Init SQ context */
972 if (roc_model_is_cn9k())
973 rc = sq_cn9k_init(nix, sq, rr_quantum, smq);
975 rc = sq_init(nix, sq, rr_quantum, smq);
980 rc = mbox_process(mbox);
985 sq->io_addr = nix->base + NIX_LF_OP_SENDX(0);
986 /* Evenly distribute LMT slot for each sq */
987 if (roc_model_is_cn9k()) {
988 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */
989 sq->lmt_addr = (void *)(nix->lmt_base +
990 ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12));
993 rc = nix_tel_node_add_sq(sq);
1002 roc_nix_sq_fini(struct roc_nix_sq *sq)
1006 struct ndc_sync_op *ndc_req;
1011 return NIX_ERR_PARAM;
1013 nix = roc_nix_to_nix_priv(sq->roc_nix);
1014 mbox = (&nix->dev)->mbox;
1018 rc = nix_tm_sq_flush_pre(sq);
1020 /* Release SQ context */
1021 if (roc_model_is_cn9k())
1022 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
1024 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
1026 /* Sync NDC-NIX-TX for LF */
1027 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
1028 if (ndc_req == NULL)
1030 ndc_req->nix_lf_tx_sync = 1;
1031 if (mbox_process(mbox))
1032 rc |= NIX_ERR_NDC_SYNC;
1034 rc |= nix_tm_sq_flush_post(sq);
1036 /* Restore limit to max SQB count that the pool was created
1037 * for aura drain to succeed.
1039 roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB);
1040 rc |= roc_npa_pool_destroy(sq->aura_handle);
1042 plt_free(sq->sqe_mem);
1043 nix->sqs[qid] = NULL;
1049 roc_nix_cq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head,
1052 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1056 if (head == NULL || tail == NULL)
1059 reg = (((uint64_t)qid) << 32);
1060 addr = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
1061 val = roc_atomic64_add_nosync(reg, addr);
1063 (BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) | BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR)))
1066 *tail = (uint32_t)(val & 0xFFFFF);
1067 *head = (uint32_t)((val >> 20) & 0xFFFFF);
1071 roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head,
1074 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1075 struct roc_nix_sq *sq = nix->sqs[qid];
1076 uint16_t sqes_per_sqb, sqb_cnt;
1080 if (head == NULL || tail == NULL)
1083 reg = (((uint64_t)qid) << 32);
1084 addr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
1085 val = roc_atomic64_add_nosync(reg, addr);
1086 if (val & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR)) {
1091 *tail = (uint32_t)((val >> 28) & 0x3F);
1092 *head = (uint32_t)((val >> 20) & 0x3F);
1093 sqb_cnt = (uint16_t)(val & 0xFFFF);
1095 sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
1097 /* Update tail index as per used sqb count */
1098 *tail += (sqes_per_sqb * (sqb_cnt - 1));