common/cnxk: change NIX debug API and queue API interface
[dpdk.git] / drivers / common / cnxk / roc_nix_queue.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <math.h>
6
7 #include "roc_api.h"
8 #include "roc_priv.h"
9
10 static inline uint32_t
11 nix_qsize_to_val(enum nix_q_size qsize)
12 {
13         return (16UL << (qsize * 2));
14 }
15
16 static inline enum nix_q_size
17 nix_qsize_clampup(uint32_t val)
18 {
19         int i = nix_q_size_16;
20
21         for (; i < nix_q_size_max; i++)
22                 if (val <= nix_qsize_to_val(i))
23                         break;
24
25         if (i >= nix_q_size_max)
26                 i = nix_q_size_max - 1;
27
28         return i;
29 }
30
31 int
32 nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
33 {
34         struct mbox *mbox = dev->mbox;
35
36         /* Pkts will be dropped silently if RQ is disabled */
37         if (roc_model_is_cn9k()) {
38                 struct nix_aq_enq_req *aq;
39
40                 aq = mbox_alloc_msg_nix_aq_enq(mbox);
41                 aq->qidx = rq->qid;
42                 aq->ctype = NIX_AQ_CTYPE_RQ;
43                 aq->op = NIX_AQ_INSTOP_WRITE;
44
45                 aq->rq.ena = enable;
46                 aq->rq_mask.ena = ~(aq->rq_mask.ena);
47         } else {
48                 struct nix_cn10k_aq_enq_req *aq;
49
50                 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
51                 aq->qidx = rq->qid;
52                 aq->ctype = NIX_AQ_CTYPE_RQ;
53                 aq->op = NIX_AQ_INSTOP_WRITE;
54
55                 aq->rq.ena = enable;
56                 aq->rq_mask.ena = ~(aq->rq_mask.ena);
57         }
58
59         return mbox_process(mbox);
60 }
61
62 int
63 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
64 {
65         struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
66         int rc;
67
68         rc = nix_rq_ena_dis(&nix->dev, rq, enable);
69
70         if (roc_model_is_cn10k())
71                 plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH);
72         return rc;
73 }
74
75 int
76 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
77                 bool cfg, bool ena)
78 {
79         struct mbox *mbox = dev->mbox;
80         struct nix_aq_enq_req *aq;
81
82         aq = mbox_alloc_msg_nix_aq_enq(mbox);
83         aq->qidx = rq->qid;
84         aq->ctype = NIX_AQ_CTYPE_RQ;
85         aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
86
87         if (rq->sso_ena) {
88                 /* SSO mode */
89                 aq->rq.sso_ena = 1;
90                 aq->rq.sso_tt = rq->tt;
91                 aq->rq.sso_grp = rq->hwgrp;
92                 aq->rq.ena_wqwd = 1;
93                 aq->rq.wqe_skip = rq->wqe_skip;
94                 aq->rq.wqe_caching = 1;
95
96                 aq->rq.good_utag = rq->tag_mask >> 24;
97                 aq->rq.bad_utag = rq->tag_mask >> 24;
98                 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
99         } else {
100                 /* CQ mode */
101                 aq->rq.sso_ena = 0;
102                 aq->rq.good_utag = rq->tag_mask >> 24;
103                 aq->rq.bad_utag = rq->tag_mask >> 24;
104                 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
105                 aq->rq.cq = rq->qid;
106         }
107
108         if (rq->ipsech_ena)
109                 aq->rq.ipsech_ena = 1;
110
111         aq->rq.spb_ena = 0;
112         aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
113
114         /* Sizes must be aligned to 8 bytes */
115         if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
116                 return -EINVAL;
117
118         /* Expressed in number of dwords */
119         aq->rq.first_skip = rq->first_skip / 8;
120         aq->rq.later_skip = rq->later_skip / 8;
121         aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
122         aq->rq.lpb_sizem1 = rq->lpb_size / 8;
123         aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
124         aq->rq.ena = ena;
125         aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
126         aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
127         aq->rq.rq_int_ena = 0;
128         /* Many to one reduction */
129         aq->rq.qint_idx = rq->qid % qints;
130         aq->rq.xqe_drop_ena = 1;
131
132         /* If RED enabled, then fill enable for all cases */
133         if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
134                 aq->rq.spb_aura_pass = rq->spb_red_pass;
135                 aq->rq.lpb_aura_pass = rq->red_pass;
136
137                 aq->rq.spb_aura_drop = rq->spb_red_drop;
138                 aq->rq.lpb_aura_drop = rq->red_drop;
139         }
140
141         if (cfg) {
142                 if (rq->sso_ena) {
143                         /* SSO mode */
144                         aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
145                         aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
146                         aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
147                         aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
148                         aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
149                         aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
150                         aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
151                         aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
152                         aq->rq_mask.ltag = ~aq->rq_mask.ltag;
153                 } else {
154                         /* CQ mode */
155                         aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
156                         aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
157                         aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
158                         aq->rq_mask.ltag = ~aq->rq_mask.ltag;
159                         aq->rq_mask.cq = ~aq->rq_mask.cq;
160                 }
161
162                 if (rq->ipsech_ena)
163                         aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
164
165                 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
166                 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
167                 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
168                 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
169                 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
170                 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
171                 aq->rq_mask.ena = ~aq->rq_mask.ena;
172                 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
173                 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
174                 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
175                 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
176                 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
177
178                 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
179                         aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
180                         aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
181
182                         aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
183                         aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
184                 }
185         }
186
187         return 0;
188 }
189
190 int
191 nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
192            bool ena)
193 {
194         struct nix_cn10k_aq_enq_req *aq;
195         struct mbox *mbox = dev->mbox;
196
197         aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
198         aq->qidx = rq->qid;
199         aq->ctype = NIX_AQ_CTYPE_RQ;
200         aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
201
202         if (rq->sso_ena) {
203                 /* SSO mode */
204                 aq->rq.sso_ena = 1;
205                 aq->rq.sso_tt = rq->tt;
206                 aq->rq.sso_grp = rq->hwgrp;
207                 aq->rq.ena_wqwd = 1;
208                 aq->rq.wqe_skip = rq->wqe_skip;
209                 aq->rq.wqe_caching = 1;
210
211                 aq->rq.good_utag = rq->tag_mask >> 24;
212                 aq->rq.bad_utag = rq->tag_mask >> 24;
213                 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
214
215                 if (rq->vwqe_ena) {
216                         aq->rq.vwqe_ena = true;
217                         aq->rq.vwqe_skip = rq->vwqe_first_skip;
218                         /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
219                         aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
220                         aq->rq.vtime_wait = rq->vwqe_wait_tmo;
221                         aq->rq.wqe_aura = rq->vwqe_aura_handle;
222                 }
223         } else {
224                 /* CQ mode */
225                 aq->rq.sso_ena = 0;
226                 aq->rq.good_utag = rq->tag_mask >> 24;
227                 aq->rq.bad_utag = rq->tag_mask >> 24;
228                 aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
229                 aq->rq.cq = rq->qid;
230         }
231
232         if (rq->ipsech_ena) {
233                 aq->rq.ipsech_ena = 1;
234                 aq->rq.ipsecd_drop_en = 1;
235         }
236
237         aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
238
239         /* Sizes must be aligned to 8 bytes */
240         if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
241                 return -EINVAL;
242
243         /* Expressed in number of dwords */
244         aq->rq.first_skip = rq->first_skip / 8;
245         aq->rq.later_skip = rq->later_skip / 8;
246         aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
247         aq->rq.lpb_sizem1 = rq->lpb_size / 8;
248         aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
249         aq->rq.ena = ena;
250
251         if (rq->spb_ena) {
252                 uint32_t spb_sizem1;
253
254                 aq->rq.spb_ena = 1;
255                 aq->rq.spb_aura =
256                         roc_npa_aura_handle_to_aura(rq->spb_aura_handle);
257
258                 if (rq->spb_size & 0x7 ||
259                     rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE)
260                         return -EINVAL;
261
262                 spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */
263                 spb_sizem1 -= 1;               /* Expressed in size minus one */
264                 aq->rq.spb_sizem1 = spb_sizem1 & 0x3F;
265                 aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7;
266         } else {
267                 aq->rq.spb_ena = 0;
268         }
269
270         aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
271         aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
272         aq->rq.rq_int_ena = 0;
273         /* Many to one reduction */
274         aq->rq.qint_idx = rq->qid % qints;
275         aq->rq.xqe_drop_ena = 1;
276
277         /* If RED enabled, then fill enable for all cases */
278         if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
279                 aq->rq.spb_pool_pass = rq->red_pass;
280                 aq->rq.spb_aura_pass = rq->red_pass;
281                 aq->rq.lpb_pool_pass = rq->red_pass;
282                 aq->rq.lpb_aura_pass = rq->red_pass;
283                 aq->rq.wqe_pool_pass = rq->red_pass;
284                 aq->rq.xqe_pass = rq->red_pass;
285
286                 aq->rq.spb_pool_drop = rq->red_drop;
287                 aq->rq.spb_aura_drop = rq->red_drop;
288                 aq->rq.lpb_pool_drop = rq->red_drop;
289                 aq->rq.lpb_aura_drop = rq->red_drop;
290                 aq->rq.wqe_pool_drop = rq->red_drop;
291                 aq->rq.xqe_drop = rq->red_drop;
292         }
293
294         if (cfg) {
295                 if (rq->sso_ena) {
296                         /* SSO mode */
297                         aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
298                         aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
299                         aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
300                         aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
301                         aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
302                         aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
303                         aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
304                         aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
305                         aq->rq_mask.ltag = ~aq->rq_mask.ltag;
306                         if (rq->vwqe_ena) {
307                                 aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena;
308                                 aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip;
309                                 aq->rq_mask.max_vsize_exp =
310                                         ~aq->rq_mask.max_vsize_exp;
311                                 aq->rq_mask.vtime_wait =
312                                         ~aq->rq_mask.vtime_wait;
313                                 aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura;
314                         }
315                 } else {
316                         /* CQ mode */
317                         aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
318                         aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
319                         aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
320                         aq->rq_mask.ltag = ~aq->rq_mask.ltag;
321                         aq->rq_mask.cq = ~aq->rq_mask.cq;
322                 }
323
324                 if (rq->ipsech_ena)
325                         aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
326
327                 if (rq->spb_ena) {
328                         aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura;
329                         aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1;
330                         aq->rq_mask.spb_high_sizem1 =
331                                 ~aq->rq_mask.spb_high_sizem1;
332                 }
333
334                 aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
335                 aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
336                 aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
337                 aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
338                 aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
339                 aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
340                 aq->rq_mask.ena = ~aq->rq_mask.ena;
341                 aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
342                 aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
343                 aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
344                 aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
345                 aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
346
347                 if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
348                         aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
349                         aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
350                         aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
351                         aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
352                         aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass;
353                         aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass;
354
355                         aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
356                         aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
357                         aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
358                         aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
359                         aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop;
360                         aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop;
361                 }
362         }
363
364         return 0;
365 }
366
367 int
368 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
369 {
370         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
371         struct mbox *mbox = (&nix->dev)->mbox;
372         bool is_cn9k = roc_model_is_cn9k();
373         struct dev *dev = &nix->dev;
374         int rc;
375
376         if (roc_nix == NULL || rq == NULL)
377                 return NIX_ERR_PARAM;
378
379         if (rq->qid >= nix->nb_rx_queues)
380                 return NIX_ERR_QUEUE_INVALID_RANGE;
381
382         rq->roc_nix = roc_nix;
383
384         if (is_cn9k)
385                 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena);
386         else
387                 rc = nix_rq_cfg(dev, rq, nix->qints, false, ena);
388
389         if (rc)
390                 return rc;
391
392         return mbox_process(mbox);
393 }
394
395 int
396 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
397 {
398         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
399         struct mbox *mbox = (&nix->dev)->mbox;
400         bool is_cn9k = roc_model_is_cn9k();
401         struct dev *dev = &nix->dev;
402         int rc;
403
404         if (roc_nix == NULL || rq == NULL)
405                 return NIX_ERR_PARAM;
406
407         if (rq->qid >= nix->nb_rx_queues)
408                 return NIX_ERR_QUEUE_INVALID_RANGE;
409
410         rq->roc_nix = roc_nix;
411
412         if (is_cn9k)
413                 rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena);
414         else
415                 rc = nix_rq_cfg(dev, rq, nix->qints, true, ena);
416
417         if (rc)
418                 return rc;
419
420         return mbox_process(mbox);
421 }
422
423 int
424 roc_nix_rq_fini(struct roc_nix_rq *rq)
425 {
426         /* Disabling RQ is sufficient */
427         return roc_nix_rq_ena_dis(rq, false);
428 }
429
430 int
431 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
432 {
433         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
434         struct mbox *mbox = (&nix->dev)->mbox;
435         volatile struct nix_cq_ctx_s *cq_ctx;
436         enum nix_q_size qsize;
437         size_t desc_sz;
438         int rc;
439
440         if (cq == NULL)
441                 return NIX_ERR_PARAM;
442
443         if (cq->qid >= nix->nb_rx_queues)
444                 return NIX_ERR_QUEUE_INVALID_RANGE;
445
446         qsize = nix_qsize_clampup(cq->nb_desc);
447         cq->nb_desc = nix_qsize_to_val(qsize);
448         cq->qmask = cq->nb_desc - 1;
449         cq->door = nix->base + NIX_LF_CQ_OP_DOOR;
450         cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
451         cq->wdata = (uint64_t)cq->qid << 32;
452         cq->roc_nix = roc_nix;
453
454         /* CQE of W16 */
455         desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ;
456         cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN);
457         if (cq->desc_base == NULL) {
458                 rc = NIX_ERR_NO_MEM;
459                 goto fail;
460         }
461
462         if (roc_model_is_cn9k()) {
463                 struct nix_aq_enq_req *aq;
464
465                 aq = mbox_alloc_msg_nix_aq_enq(mbox);
466                 aq->qidx = cq->qid;
467                 aq->ctype = NIX_AQ_CTYPE_CQ;
468                 aq->op = NIX_AQ_INSTOP_INIT;
469                 cq_ctx = &aq->cq;
470         } else {
471                 struct nix_cn10k_aq_enq_req *aq;
472
473                 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
474                 aq->qidx = cq->qid;
475                 aq->ctype = NIX_AQ_CTYPE_CQ;
476                 aq->op = NIX_AQ_INSTOP_INIT;
477                 cq_ctx = &aq->cq;
478         }
479
480         cq_ctx->ena = 1;
481         cq_ctx->caching = 1;
482         cq_ctx->qsize = qsize;
483         cq_ctx->base = (uint64_t)cq->desc_base;
484         cq_ctx->avg_level = 0xff;
485         cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
486         cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
487
488         /* Many to one reduction */
489         cq_ctx->qint_idx = cq->qid % nix->qints;
490         /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
491         cq_ctx->cint_idx = cq->qid;
492
493         if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
494                 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
495                 uint16_t min_rx_drop;
496
497                 min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc);
498                 cq_ctx->drop = min_rx_drop;
499                 cq_ctx->drop_ena = 1;
500                 cq->drop_thresh = min_rx_drop;
501         } else {
502                 cq->drop_thresh = NIX_CQ_THRESH_LEVEL;
503                 cq_ctx->drop = cq->drop_thresh;
504                 cq_ctx->drop_ena = 1;
505         }
506
507         /* TX pause frames enable flow ctrl on RX side */
508         if (nix->tx_pause) {
509                 /* Single BPID is allocated for all rx channels for now */
510                 cq_ctx->bpid = nix->bpid[0];
511                 cq_ctx->bp = cq_ctx->drop;
512                 cq_ctx->bp_ena = 1;
513         }
514
515         rc = mbox_process(mbox);
516         if (rc)
517                 goto free_mem;
518
519         return 0;
520
521 free_mem:
522         plt_free(cq->desc_base);
523 fail:
524         return rc;
525 }
526
527 int
528 roc_nix_cq_fini(struct roc_nix_cq *cq)
529 {
530         struct mbox *mbox;
531         struct nix *nix;
532         int rc;
533
534         if (cq == NULL)
535                 return NIX_ERR_PARAM;
536
537         nix = roc_nix_to_nix_priv(cq->roc_nix);
538         mbox = (&nix->dev)->mbox;
539
540         /* Disable CQ */
541         if (roc_model_is_cn9k()) {
542                 struct nix_aq_enq_req *aq;
543
544                 aq = mbox_alloc_msg_nix_aq_enq(mbox);
545                 aq->qidx = cq->qid;
546                 aq->ctype = NIX_AQ_CTYPE_CQ;
547                 aq->op = NIX_AQ_INSTOP_WRITE;
548                 aq->cq.ena = 0;
549                 aq->cq.bp_ena = 0;
550                 aq->cq_mask.ena = ~aq->cq_mask.ena;
551                 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
552         } else {
553                 struct nix_cn10k_aq_enq_req *aq;
554
555                 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
556                 aq->qidx = cq->qid;
557                 aq->ctype = NIX_AQ_CTYPE_CQ;
558                 aq->op = NIX_AQ_INSTOP_WRITE;
559                 aq->cq.ena = 0;
560                 aq->cq.bp_ena = 0;
561                 aq->cq_mask.ena = ~aq->cq_mask.ena;
562                 aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
563         }
564
565         rc = mbox_process(mbox);
566         if (rc)
567                 return rc;
568
569         plt_free(cq->desc_base);
570         return 0;
571 }
572
573 static int
574 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
575 {
576         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
577         uint16_t sqes_per_sqb, count, nb_sqb_bufs;
578         struct npa_pool_s pool;
579         struct npa_aura_s aura;
580         uint64_t blk_sz;
581         uint64_t iova;
582         int rc;
583
584         blk_sz = nix->sqb_size;
585         if (sq->max_sqe_sz == roc_nix_maxsqesz_w16)
586                 sqes_per_sqb = (blk_sz / 8) / 16;
587         else
588                 sqes_per_sqb = (blk_sz / 8) / 8;
589
590         sq->nb_desc = PLT_MAX(256U, sq->nb_desc);
591         nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
592         nb_sqb_bufs += NIX_SQB_LIST_SPACE;
593         /* Clamp up the SQB count */
594         nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
595                               (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
596
597         sq->nb_sqb_bufs = nb_sqb_bufs;
598         sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
599         sq->nb_sqb_bufs_adj =
600                 nb_sqb_bufs -
601                 (PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
602         sq->nb_sqb_bufs_adj =
603                 (sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100;
604
605         /* Explicitly set nat_align alone as by default pool is with both
606          * nat_align and buf_offset = 1 which we don't want for SQB.
607          */
608         memset(&pool, 0, sizeof(struct npa_pool_s));
609         pool.nat_align = 1;
610
611         memset(&aura, 0, sizeof(aura));
612         aura.fc_ena = 1;
613         if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0())
614                 aura.fc_stype = 0x0; /* STF */
615         else
616                 aura.fc_stype = 0x3; /* STSTP */
617         aura.fc_addr = (uint64_t)sq->fc;
618         aura.fc_hyst_bits = 0; /* Store count on all updates */
619         rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
620                                  &pool);
621         if (rc)
622                 goto fail;
623
624         sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
625         if (sq->sqe_mem == NULL) {
626                 rc = NIX_ERR_NO_MEM;
627                 goto nomem;
628         }
629
630         /* Fill the initial buffers */
631         iova = (uint64_t)sq->sqe_mem;
632         for (count = 0; count < NIX_MAX_SQB; count++) {
633                 roc_npa_aura_op_free(sq->aura_handle, 0, iova);
634                 iova += blk_sz;
635         }
636         roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
637         roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
638         sq->aura_sqb_bufs = NIX_MAX_SQB;
639
640         return rc;
641 nomem:
642         roc_npa_pool_destroy(sq->aura_handle);
643 fail:
644         return rc;
645 }
646
647 static void
648 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
649              uint16_t smq)
650 {
651         struct mbox *mbox = (&nix->dev)->mbox;
652         struct nix_aq_enq_req *aq;
653
654         aq = mbox_alloc_msg_nix_aq_enq(mbox);
655         aq->qidx = sq->qid;
656         aq->ctype = NIX_AQ_CTYPE_SQ;
657         aq->op = NIX_AQ_INSTOP_INIT;
658         aq->sq.max_sqe_size = sq->max_sqe_sz;
659
660         aq->sq.max_sqe_size = sq->max_sqe_sz;
661         aq->sq.smq = smq;
662         aq->sq.smq_rr_quantum = rr_quantum;
663         aq->sq.default_chan = nix->tx_chan_base;
664         aq->sq.sqe_stype = NIX_STYPE_STF;
665         aq->sq.ena = 1;
666         aq->sq.sso_ena = !!sq->sso_ena;
667         if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
668                 aq->sq.sqe_stype = NIX_STYPE_STP;
669         aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
670         aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
671         aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
672         aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
673         aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
674
675         /* Many to one reduction */
676         aq->sq.qint_idx = sq->qid % nix->qints;
677 }
678
679 static int
680 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
681 {
682         struct mbox *mbox = (&nix->dev)->mbox;
683         struct nix_aq_enq_rsp *rsp;
684         struct nix_aq_enq_req *aq;
685         uint16_t sqes_per_sqb;
686         void *sqb_buf;
687         int rc, count;
688
689         aq = mbox_alloc_msg_nix_aq_enq(mbox);
690         aq->qidx = sq->qid;
691         aq->ctype = NIX_AQ_CTYPE_SQ;
692         aq->op = NIX_AQ_INSTOP_READ;
693         rc = mbox_process_msg(mbox, (void *)&rsp);
694         if (rc)
695                 return rc;
696
697         /* Check if sq is already cleaned up */
698         if (!rsp->sq.ena)
699                 return 0;
700
701         /* Disable sq */
702         aq = mbox_alloc_msg_nix_aq_enq(mbox);
703         aq->qidx = sq->qid;
704         aq->ctype = NIX_AQ_CTYPE_SQ;
705         aq->op = NIX_AQ_INSTOP_WRITE;
706         aq->sq_mask.ena = ~aq->sq_mask.ena;
707         aq->sq.ena = 0;
708         rc = mbox_process(mbox);
709         if (rc)
710                 return rc;
711
712         /* Read SQ and free sqb's */
713         aq = mbox_alloc_msg_nix_aq_enq(mbox);
714         aq->qidx = sq->qid;
715         aq->ctype = NIX_AQ_CTYPE_SQ;
716         aq->op = NIX_AQ_INSTOP_READ;
717         rc = mbox_process_msg(mbox, (void *)&rsp);
718         if (rc)
719                 return rc;
720
721         if (aq->sq.smq_pend)
722                 plt_err("SQ has pending SQE's");
723
724         count = aq->sq.sqb_count;
725         sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
726         /* Free SQB's that are used */
727         sqb_buf = (void *)rsp->sq.head_sqb;
728         while (count) {
729                 void *next_sqb;
730
731                 next_sqb = *(void **)((uintptr_t)sqb_buf +
732                                       (uint32_t)((sqes_per_sqb - 1) *
733                                                  sq->max_sqe_sz));
734                 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
735                 sqb_buf = next_sqb;
736                 count--;
737         }
738
739         /* Free next to use sqb */
740         if (rsp->sq.next_sqb)
741                 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
742         return 0;
743 }
744
745 static void
746 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
747         uint16_t smq)
748 {
749         struct mbox *mbox = (&nix->dev)->mbox;
750         struct nix_cn10k_aq_enq_req *aq;
751
752         aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
753         aq->qidx = sq->qid;
754         aq->ctype = NIX_AQ_CTYPE_SQ;
755         aq->op = NIX_AQ_INSTOP_INIT;
756         aq->sq.max_sqe_size = sq->max_sqe_sz;
757
758         aq->sq.max_sqe_size = sq->max_sqe_sz;
759         aq->sq.smq = smq;
760         aq->sq.smq_rr_weight = rr_quantum;
761         aq->sq.default_chan = nix->tx_chan_base;
762         aq->sq.sqe_stype = NIX_STYPE_STF;
763         aq->sq.ena = 1;
764         aq->sq.sso_ena = !!sq->sso_ena;
765         if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
766                 aq->sq.sqe_stype = NIX_STYPE_STP;
767         aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
768         aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
769         aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
770         aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
771         aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
772
773         /* Many to one reduction */
774         aq->sq.qint_idx = sq->qid % nix->qints;
775 }
776
777 static int
778 sq_fini(struct nix *nix, struct roc_nix_sq *sq)
779 {
780         struct mbox *mbox = (&nix->dev)->mbox;
781         struct nix_cn10k_aq_enq_rsp *rsp;
782         struct nix_cn10k_aq_enq_req *aq;
783         uint16_t sqes_per_sqb;
784         void *sqb_buf;
785         int rc, count;
786
787         aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
788         aq->qidx = sq->qid;
789         aq->ctype = NIX_AQ_CTYPE_SQ;
790         aq->op = NIX_AQ_INSTOP_READ;
791         rc = mbox_process_msg(mbox, (void *)&rsp);
792         if (rc)
793                 return rc;
794
795         /* Check if sq is already cleaned up */
796         if (!rsp->sq.ena)
797                 return 0;
798
799         /* Disable sq */
800         aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
801         aq->qidx = sq->qid;
802         aq->ctype = NIX_AQ_CTYPE_SQ;
803         aq->op = NIX_AQ_INSTOP_WRITE;
804         aq->sq_mask.ena = ~aq->sq_mask.ena;
805         aq->sq.ena = 0;
806         rc = mbox_process(mbox);
807         if (rc)
808                 return rc;
809
810         /* Read SQ and free sqb's */
811         aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
812         aq->qidx = sq->qid;
813         aq->ctype = NIX_AQ_CTYPE_SQ;
814         aq->op = NIX_AQ_INSTOP_READ;
815         rc = mbox_process_msg(mbox, (void *)&rsp);
816         if (rc)
817                 return rc;
818
819         if (aq->sq.smq_pend)
820                 plt_err("SQ has pending SQE's");
821
822         count = aq->sq.sqb_count;
823         sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
824         /* Free SQB's that are used */
825         sqb_buf = (void *)rsp->sq.head_sqb;
826         while (count) {
827                 void *next_sqb;
828
829                 next_sqb = *(void **)((uintptr_t)sqb_buf +
830                                       (uint32_t)((sqes_per_sqb - 1) *
831                                                  sq->max_sqe_sz));
832                 roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
833                 sqb_buf = next_sqb;
834                 count--;
835         }
836
837         /* Free next to use sqb */
838         if (rsp->sq.next_sqb)
839                 roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
840         return 0;
841 }
842
843 int
844 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
845 {
846         struct nix *nix = roc_nix_to_nix_priv(roc_nix);
847         struct mbox *mbox = (&nix->dev)->mbox;
848         uint16_t qid, smq = UINT16_MAX;
849         uint32_t rr_quantum = 0;
850         int rc;
851
852         if (sq == NULL)
853                 return NIX_ERR_PARAM;
854
855         qid = sq->qid;
856         if (qid >= nix->nb_tx_queues)
857                 return NIX_ERR_QUEUE_INVALID_RANGE;
858
859         sq->roc_nix = roc_nix;
860         /*
861          * Allocate memory for flow control updates from HW.
862          * Alloc one cache line, so that fits all FC_STYPE modes.
863          */
864         sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
865         if (sq->fc == NULL) {
866                 rc = NIX_ERR_NO_MEM;
867                 goto fail;
868         }
869
870         rc = sqb_pool_populate(roc_nix, sq);
871         if (rc)
872                 goto nomem;
873
874         rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq);
875         if (rc) {
876                 rc = NIX_ERR_TM_LEAF_NODE_GET;
877                 goto nomem;
878         }
879
880         /* Init SQ context */
881         if (roc_model_is_cn9k())
882                 sq_cn9k_init(nix, sq, rr_quantum, smq);
883         else
884                 sq_init(nix, sq, rr_quantum, smq);
885
886         rc = mbox_process(mbox);
887         if (rc)
888                 goto nomem;
889
890         nix->sqs[qid] = sq;
891         sq->io_addr = nix->base + NIX_LF_OP_SENDX(0);
892         /* Evenly distribute LMT slot for each sq */
893         if (roc_model_is_cn9k()) {
894                 /* Multiple cores/SQ's can use same LMTLINE safely in CN9K */
895                 sq->lmt_addr = (void *)(nix->lmt_base +
896                                         ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12));
897         }
898
899         return rc;
900 nomem:
901         plt_free(sq->fc);
902 fail:
903         return rc;
904 }
905
906 int
907 roc_nix_sq_fini(struct roc_nix_sq *sq)
908 {
909         struct nix *nix;
910         struct mbox *mbox;
911         struct ndc_sync_op *ndc_req;
912         uint16_t qid;
913         int rc = 0;
914
915         if (sq == NULL)
916                 return NIX_ERR_PARAM;
917
918         nix = roc_nix_to_nix_priv(sq->roc_nix);
919         mbox = (&nix->dev)->mbox;
920
921         qid = sq->qid;
922
923         rc = nix_tm_sq_flush_pre(sq);
924
925         /* Release SQ context */
926         if (roc_model_is_cn9k())
927                 rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
928         else
929                 rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
930
931         /* Sync NDC-NIX-TX for LF */
932         ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
933         if (ndc_req == NULL)
934                 return -ENOSPC;
935         ndc_req->nix_lf_tx_sync = 1;
936         if (mbox_process(mbox))
937                 rc |= NIX_ERR_NDC_SYNC;
938
939         rc |= nix_tm_sq_flush_post(sq);
940         rc |= roc_npa_pool_destroy(sq->aura_handle);
941         plt_free(sq->fc);
942         plt_free(sq->sqe_mem);
943         nix->sqs[qid] = NULL;
944
945         return rc;
946 }