bus/fslmc: remove the export for QBMAN version
[dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *     * Redistributions of source code must retain the above copyright
9  *       notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above copyright
11  *       notice, this list of conditions and the following disclaimer in the
12  *       documentation and/or other materials provided with the distribution.
13  *     * Neither the name of Freescale Semiconductor nor the
14  *       names of its contributors may be used to endorse or promote products
15  *       derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include "qbman_portal.h"
30
31 /* QBMan portal management command codes */
32 #define QBMAN_MC_ACQUIRE       0x30
33 #define QBMAN_WQCHAN_CONFIGURE 0x46
34
35 /* CINH register offsets */
36 #define QBMAN_CINH_SWP_EQCR_PI 0x800
37 #define QBMAN_CINH_SWP_EQCR_CI 0x840
38 #define QBMAN_CINH_SWP_EQAR    0x8c0
39 #define QBMAN_CINH_SWP_DQPI    0xa00
40 #define QBMAN_CINH_SWP_DCAP    0xac0
41 #define QBMAN_CINH_SWP_SDQCR   0xb00
42 #define QBMAN_CINH_SWP_RAR     0xcc0
43 #define QBMAN_CINH_SWP_ISR     0xe00
44 #define QBMAN_CINH_SWP_IER     0xe40
45 #define QBMAN_CINH_SWP_ISDR    0xe80
46 #define QBMAN_CINH_SWP_IIR     0xec0
47 #define QBMAN_CINH_SWP_DQRR_ITR    0xa80
48 #define QBMAN_CINH_SWP_ITPR    0xf40
49
50 /* CENA register offsets */
51 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
53 #define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((uint32_t)(n) << 6))
54 #define QBMAN_CENA_SWP_CR      0x600
55 #define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((uint32_t)(vb) >> 1))
56 #define QBMAN_CENA_SWP_VDQCR   0x780
57 #define QBMAN_CENA_SWP_EQCR_CI 0x840
58
59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
61
62 /* QBMan FQ management command codes */
63 #define QBMAN_FQ_SCHEDULE       0x48
64 #define QBMAN_FQ_FORCE          0x49
65 #define QBMAN_FQ_XON            0x4d
66 #define QBMAN_FQ_XOFF           0x4e
67
68 /*******************************/
69 /* Pre-defined attribute codes */
70 /*******************************/
71
72 struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
73 struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
74
75 /*************************/
76 /* SDQCR attribute codes */
77 /*************************/
78
79 /* we put these here because at least some of them are required by
80  * qbman_swp_init()
81  */
82 struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
83 struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
84 struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
85 static struct qb_attr_code code_eq_dca_idx;
86 #define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
87 enum qbman_sdqcr_dct {
88         qbman_sdqcr_dct_null = 0,
89         qbman_sdqcr_dct_prio_ics,
90         qbman_sdqcr_dct_active_ics,
91         qbman_sdqcr_dct_active
92 };
93
94 enum qbman_sdqcr_fc {
95         qbman_sdqcr_fc_one = 0,
96         qbman_sdqcr_fc_up_to_3 = 1
97 };
98
99 struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);
100
101 /* We need to keep track of which SWP triggered a pull command
102  * so keep an array of portal IDs and use the token field to
103  * be able to find the proper portal
104  */
105 #define MAX_QBMAN_PORTALS  35
106 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
107
108 /*********************************/
109 /* Portal constructor/destructor */
110 /*********************************/
111
112 /* Software portals should always be in the power-on state when we initialise,
113  * due to the CCSR-based portal reset functionality that MC has.
114  *
115  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
116  * valid-bits, so we need to support a workaround where we don't trust
117  * valid-bits when detecting new entries until any stale ring entries have been
118  * overwritten at least once. The idea is that we read PI for the first few
119  * entries, then switch to valid-bit after that. The trick is to clear the
120  * bug-work-around boolean once the PI wraps around the ring for the first time.
121  *
122  * Note: this still carries a slight additional cost once the decrementer hits
123  * zero.
124  */
125 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
126 {
127         int ret;
128         uint32_t eqcr_pi;
129         struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
130
131         if (!p)
132                 return NULL;
133         p->desc = *d;
134 #ifdef QBMAN_CHECKING
135         p->mc.check = swp_mc_can_start;
136 #endif
137         p->mc.valid_bit = QB_VALID_BIT;
138         p->sdq = 0;
139         qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
140         qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
141         qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
142         atomic_set(&p->vdq.busy, 1);
143         p->vdq.valid_bit = QB_VALID_BIT;
144         p->dqrr.next_idx = 0;
145         p->dqrr.valid_bit = QB_VALID_BIT;
146         qman_version = p->desc.qman_version;
147         if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
148                 p->dqrr.dqrr_size = 4;
149                 p->dqrr.reset_bug = 1;
150                 /* Set size of DQRR to 4, encoded in 2 bits */
151                 code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 2);
152         } else {
153                 p->dqrr.dqrr_size = 8;
154                 p->dqrr.reset_bug = 0;
155                 /* Set size of DQRR to 8, encoded in 3 bits */
156                 code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 3);
157         }
158
159         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
160         if (ret) {
161                 kfree(p);
162                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
163                 return NULL;
164         }
165         /* SDQCR needs to be initialized to 0 when no channels are
166          * being dequeued from or else the QMan HW will indicate an
167          * error.  The values that were calculated above will be
168          * applied when dequeues from a specific channel are enabled
169          */
170         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
171         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
172         p->eqcr.pi = eqcr_pi & 0xF;
173         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
174         p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
175         p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
176                                                 p->eqcr.ci, p->eqcr.pi);
177
178         portal_idx_map[p->desc.idx] = p;
179         return p;
180 }
181
182 void qbman_swp_finish(struct qbman_swp *p)
183 {
184 #ifdef QBMAN_CHECKING
185         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
186 #endif
187         qbman_swp_sys_finish(&p->sys);
188         portal_idx_map[p->desc.idx] = NULL;
189         kfree(p);
190 }
191
192 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
193 {
194         return &p->desc;
195 }
196
197 /**************/
198 /* Interrupts */
199 /**************/
200
201 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
202 {
203         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
204 }
205
206 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
207 {
208         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
209 }
210
211 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
212 {
213         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
214 }
215
216 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
217 {
218         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
219 }
220
221 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
222 {
223         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
224 }
225
226 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
227 {
228         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
229 }
230
231 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
232 {
233         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
234 }
235
236 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
237 {
238         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
239 }
240
241 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
242 {
243         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
244 }
245
246 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
247 {
248         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
249 }
250
251 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
252 {
253         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
254 }
255
256 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
257 {
258         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
259 }
260
261 /***********************/
262 /* Management commands */
263 /***********************/
264
265 /*
266  * Internal code common to all types of management commands.
267  */
268
269 void *qbman_swp_mc_start(struct qbman_swp *p)
270 {
271         void *ret;
272 #ifdef QBMAN_CHECKING
273         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
274 #endif
275         ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
276 #ifdef QBMAN_CHECKING
277         if (!ret)
278                 p->mc.check = swp_mc_can_submit;
279 #endif
280         return ret;
281 }
282
283 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
284 {
285         uint32_t *v = cmd;
286 #ifdef QBMAN_CHECKING
287         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
288 #endif
289         /* TBD: "|=" is going to hurt performance. Need to move as many fields
290          * out of word zero, and for those that remain, the "OR" needs to occur
291          * at the caller side. This debug check helps to catch cases where the
292          * caller wants to OR but has forgotten to do so.
293          */
294         QBMAN_BUG_ON((*v & cmd_verb) != *v);
295         *v = cmd_verb | p->mc.valid_bit;
296         qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
297 #ifdef QBMAN_CHECKING
298         p->mc.check = swp_mc_can_poll;
299 #endif
300 }
301
302 void *qbman_swp_mc_result(struct qbman_swp *p)
303 {
304         uint32_t *ret, verb;
305 #ifdef QBMAN_CHECKING
306         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
307 #endif
308         qbman_cena_invalidate_prefetch(&p->sys,
309                                        QBMAN_CENA_SWP_RR(p->mc.valid_bit));
310         ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
311         /* Remove the valid-bit - command completed if the rest is non-zero */
312         verb = ret[0] & ~QB_VALID_BIT;
313         if (!verb)
314                 return NULL;
315 #ifdef QBMAN_CHECKING
316         p->mc.check = swp_mc_can_start;
317 #endif
318         p->mc.valid_bit ^= QB_VALID_BIT;
319         return ret;
320 }
321
322 /***********/
323 /* Enqueue */
324 /***********/
325
326 /* These should be const, eventually */
327 static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
328 static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);
329 static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);
330 static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);
331 /* Can't set code_eq_dca_idx width. Need qman version. Read at runtime */
332 static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
333 static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);
334 static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);
335 static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);
336 static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);
337 static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
338 /* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
339 static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
340 static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
341 static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
342 static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
343 static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);
344 static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
345
346 enum qbman_eq_cmd_e {
347         /* No enqueue, primarily for plugging ORP gaps for dropped frames */
348         qbman_eq_cmd_empty,
349         /* DMA an enqueue response once complete */
350         qbman_eq_cmd_respond,
351         /* DMA an enqueue response only if the enqueue fails */
352         qbman_eq_cmd_respond_reject
353 };
354
355 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
356 {
357         memset(d, 0, sizeof(*d));
358 }
359
360 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
361 {
362         uint32_t *cl = qb_cl(d);
363
364         qb_attr_code_encode(&code_eq_orp_en, cl, 0);
365         qb_attr_code_encode(&code_eq_cmd, cl,
366                             respond_success ? qbman_eq_cmd_respond :
367                                               qbman_eq_cmd_respond_reject);
368 }
369
370 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
371                            uint32_t opr_id, uint32_t seqnum, int incomplete)
372 {
373         uint32_t *cl = qb_cl(d);
374
375         qb_attr_code_encode(&code_eq_orp_en, cl, 1);
376         qb_attr_code_encode(&code_eq_cmd, cl,
377                             respond_success ? qbman_eq_cmd_respond :
378                                               qbman_eq_cmd_respond_reject);
379         qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
380         qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
381         qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);
382 }
383
384 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
385                                 uint32_t seqnum)
386 {
387         uint32_t *cl = qb_cl(d);
388
389         qb_attr_code_encode(&code_eq_orp_en, cl, 1);
390         qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
391         qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
392         qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
393         qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
394         qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);
395 }
396
397 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
398                                 uint32_t seqnum)
399 {
400         uint32_t *cl = qb_cl(d);
401
402         qb_attr_code_encode(&code_eq_orp_en, cl, 1);
403         qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
404         qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
405         qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
406         qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
407         qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);
408 }
409
410 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
411                                 dma_addr_t storage_phys,
412                                 int stash)
413 {
414         uint32_t *cl = qb_cl(d);
415
416         qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
417         qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
418 }
419
420 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
421 {
422         uint32_t *cl = qb_cl(d);
423
424         qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);
425 }
426
427 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
428 {
429         uint32_t *cl = qb_cl(d);
430
431         qb_attr_code_encode(&code_eq_qd_en, cl, 0);
432         qb_attr_code_encode(&code_eq_tgt_id, cl, fqid);
433 }
434
435 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
436                           uint32_t qd_bin, uint32_t qd_prio)
437 {
438         uint32_t *cl = qb_cl(d);
439
440         qb_attr_code_encode(&code_eq_qd_en, cl, 1);
441         qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
442         qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
443         qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
444 }
445
446 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
447 {
448         uint32_t *cl = qb_cl(d);
449
450         qb_attr_code_encode(&code_eq_eqdi, cl, !!enable);
451 }
452
453 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
454                            uint32_t dqrr_idx, int park)
455 {
456         uint32_t *cl = qb_cl(d);
457
458         qb_attr_code_encode(&code_eq_dca_en, cl, !!enable);
459         if (enable) {
460                 qb_attr_code_encode(&code_eq_dca_pk, cl, !!park);
461                 qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);
462         }
463 }
464
465 #define EQAR_IDX(eqar)     ((eqar) & 0x7)
466 #define EQAR_VB(eqar)      ((eqar) & 0x80)
467 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
468 static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
469                                         const struct qbman_eq_desc *d,
470                                  const struct qbman_fd *fd)
471 {
472         uint32_t *p;
473         const uint32_t *cl = qb_cl(d);
474         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
475
476         pr_debug("EQAR=%08x\n", eqar);
477         if (!EQAR_SUCCESS(eqar))
478                 return -EBUSY;
479         p = qbman_cena_write_start_wo_shadow(&s->sys,
480                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
481         memcpy(&p[1], &cl[1], 28);
482         memcpy(&p[8], fd, sizeof(*fd));
483         /* Set the verb byte, have to substitute in the valid-bit */
484         lwsync();
485         p[0] = cl[0] | EQAR_VB(eqar);
486         qbman_cena_write_complete_wo_shadow(&s->sys,
487                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
488         return 0;
489 }
490
491 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
492                                        const struct qbman_eq_desc *d,
493                                 const struct qbman_fd *fd)
494 {
495         uint32_t *p;
496         const uint32_t *cl = qb_cl(d);
497         uint32_t eqcr_ci;
498         uint8_t diff;
499
500         if (!s->eqcr.available) {
501                 eqcr_ci = s->eqcr.ci;
502                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
503                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
504                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
505                                    eqcr_ci, s->eqcr.ci);
506                 s->eqcr.available += diff;
507                 if (!diff)
508                         return -EBUSY;
509         }
510
511         p = qbman_cena_write_start_wo_shadow(&s->sys,
512                 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
513         memcpy(&p[1], &cl[1], 28);
514         memcpy(&p[8], fd, sizeof(*fd));
515         lwsync();
516         /* Set the verb byte, have to substitute in the valid-bit */
517         p[0] = cl[0] | s->eqcr.pi_vb;
518         qbman_cena_write_complete_wo_shadow(&s->sys,
519                 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
520         s->eqcr.pi++;
521         s->eqcr.pi &= 0xF;
522         s->eqcr.available--;
523         if (!(s->eqcr.pi & 7))
524                 s->eqcr.pi_vb ^= QB_VALID_BIT;
525         return 0;
526 }
527
528 int qbman_swp_fill_ring(struct qbman_swp *s,
529                         const struct qbman_eq_desc *d,
530                         const struct qbman_fd *fd,
531                         __attribute__((unused)) uint8_t burst_index)
532 {
533         uint32_t *p;
534         const uint32_t *cl = qb_cl(d);
535         uint32_t eqcr_ci;
536         uint8_t diff;
537
538         if (!s->eqcr.available) {
539                 eqcr_ci = s->eqcr.ci;
540                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
541                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
542                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
543                                    eqcr_ci, s->eqcr.ci);
544                 s->eqcr.available += diff;
545                 if (!diff)
546                         return -EBUSY;
547         }
548         p = qbman_cena_write_start_wo_shadow(&s->sys,
549                 QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7));
550         memcpy(&p[1], &cl[1], 7 * 4);
551         memcpy(&p[8], fd, sizeof(struct qbman_fd));
552
553         /* lwsync(); */
554         p[0] = cl[0] | s->eqcr.pi_vb;
555
556         s->eqcr.pi++;
557         s->eqcr.pi &= 0xF;
558         s->eqcr.available--;
559         if (!(s->eqcr.pi & 7))
560                 s->eqcr.pi_vb ^= QB_VALID_BIT;
561
562         return 0;
563 }
564
565 int qbman_swp_flush_ring(struct qbman_swp *s)
566 {
567         void *ptr = s->sys.addr_cena;
568
569         dcbf((uint64_t)ptr);
570         dcbf((uint64_t)ptr + 0x40);
571         dcbf((uint64_t)ptr + 0x80);
572         dcbf((uint64_t)ptr + 0xc0);
573         dcbf((uint64_t)ptr + 0x100);
574         dcbf((uint64_t)ptr + 0x140);
575         dcbf((uint64_t)ptr + 0x180);
576         dcbf((uint64_t)ptr + 0x1c0);
577
578         return 0;
579 }
580
581 void qbman_sync(void)
582 {
583         lwsync();
584 }
585
586 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
587                       const struct qbman_fd *fd)
588 {
589         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
590                 return qbman_swp_enqueue_array_mode(s, d, fd);
591         else    /* Use ring mode by default */
592                 return qbman_swp_enqueue_ring_mode(s, d, fd);
593 }
594
595 int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
596                                const struct qbman_eq_desc *d,
597                                const struct qbman_fd *fd,
598                                int num_frames)
599 {
600         uint32_t *p;
601         const uint32_t *cl = qb_cl(d);
602         uint32_t eqcr_ci, eqcr_pi;
603         uint8_t diff;
604         int i, num_enqueued = 0;
605         uint64_t addr_cena;
606
607         if (!s->eqcr.available) {
608                 eqcr_ci = s->eqcr.ci;
609                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
610                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
611                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
612                                    eqcr_ci, s->eqcr.ci);
613                 s->eqcr.available += diff;
614                 if (!diff)
615                         return 0;
616         }
617
618         eqcr_pi = s->eqcr.pi;
619         num_enqueued = (s->eqcr.available < num_frames) ?
620                         s->eqcr.available : num_frames;
621         s->eqcr.available -= num_enqueued;
622         /* Fill in the EQCR ring */
623         for (i = 0; i < num_enqueued; i++) {
624                 p = qbman_cena_write_start_wo_shadow(&s->sys,
625                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
626                 memcpy(&p[1], &cl[1], 28);
627                 memcpy(&p[8], &fd[i], sizeof(*fd));
628                 eqcr_pi++;
629                 eqcr_pi &= 0xF;
630                 /*Pointing to the next enqueue descriptor*/
631                 cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
632         }
633
634         lwsync();
635
636         /* Set the verb byte, have to substitute in the valid-bit */
637         eqcr_pi = s->eqcr.pi;
638         cl = qb_cl(d);
639         for (i = 0; i < num_enqueued; i++) {
640                 p = qbman_cena_write_start_wo_shadow(&s->sys,
641                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
642                 p[0] = cl[0] | s->eqcr.pi_vb;
643                 eqcr_pi++;
644                 eqcr_pi &= 0xF;
645                 if (!(eqcr_pi & 7))
646                         s->eqcr.pi_vb ^= QB_VALID_BIT;
647                 /*Pointing to the next enqueue descriptor*/
648                 cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
649         }
650
651         /* Flush all the cacheline without load/store in between */
652         eqcr_pi = s->eqcr.pi;
653         addr_cena = (uint64_t)s->sys.addr_cena;
654         for (i = 0; i < num_enqueued; i++) {
655                 dcbf((uint64_t *)(addr_cena +
656                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
657                 eqcr_pi++;
658                 eqcr_pi &= 0xF;
659         }
660         s->eqcr.pi = eqcr_pi;
661
662         return num_enqueued;
663 }
664
665 /*************************/
666 /* Static (push) dequeue */
667 /*************************/
668
669 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
670 {
671         struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
672
673         QBMAN_BUG_ON(channel_idx > 15);
674         *enabled = (int)qb_attr_code_decode(&code, &s->sdq);
675 }
676
677 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
678 {
679         uint16_t dqsrc;
680         struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
681
682         QBMAN_BUG_ON(channel_idx > 15);
683         qb_attr_code_encode(&code, &s->sdq, !!enable);
684         /* Read make the complete src map.  If no channels are enabled
685          * the SDQCR must be 0 or else QMan will assert errors
686          */
687         dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);
688         if (dqsrc != 0)
689                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
690         else
691                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
692 }
693
694 /***************************/
695 /* Volatile (pull) dequeue */
696 /***************************/
697
698 /* These should be const, eventually */
699 static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
700 static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
701 static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
702 static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
703 static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
704 static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
705 static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
706 static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
707
708 enum qb_pull_dt_e {
709         qb_pull_dt_channel,
710         qb_pull_dt_workqueue,
711         qb_pull_dt_framequeue
712 };
713
714 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
715 {
716         memset(d, 0, sizeof(*d));
717 }
718
719 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
720                                  struct qbman_result *storage,
721                                  dma_addr_t storage_phys,
722                                  int stash)
723 {
724         uint32_t *cl = qb_cl(d);
725         /* Squiggle the pointer 'storage' into the extra 2 words of the
726          * descriptor (which aren't copied to the hw command)
727          */
728         *(void **)&cl[4] = storage;
729         if (!storage) {
730                 qb_attr_code_encode(&code_pull_rls, cl, 0);
731                 return;
732         }
733         qb_attr_code_encode(&code_pull_rls, cl, 1);
734         qb_attr_code_encode(&code_pull_stash, cl, !!stash);
735         qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
736 }
737
738 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
739 {
740         uint32_t *cl = qb_cl(d);
741
742         QBMAN_BUG_ON(!numframes || (numframes > 16));
743         qb_attr_code_encode(&code_pull_numframes, cl,
744                             (uint32_t)(numframes - 1));
745 }
746
747 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
748 {
749         uint32_t *cl = qb_cl(d);
750
751         qb_attr_code_encode(&code_pull_token, cl, token);
752 }
753
754 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
755 {
756         uint32_t *cl = qb_cl(d);
757
758         qb_attr_code_encode(&code_pull_dct, cl, 1);
759         qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
760         qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
761 }
762
763 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
764                             enum qbman_pull_type_e dct)
765 {
766         uint32_t *cl = qb_cl(d);
767
768         qb_attr_code_encode(&code_pull_dct, cl, dct);
769         qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);
770         qb_attr_code_encode(&code_pull_dqsource, cl, wqid);
771 }
772
773 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
774                                  enum qbman_pull_type_e dct)
775 {
776         uint32_t *cl = qb_cl(d);
777
778         qb_attr_code_encode(&code_pull_dct, cl, dct);
779         qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);
780         qb_attr_code_encode(&code_pull_dqsource, cl, chid);
781 }
782
783 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
784 {
785         uint32_t *p;
786         uint32_t *cl = qb_cl(d);
787
788         if (!atomic_dec_and_test(&s->vdq.busy)) {
789                 atomic_inc(&s->vdq.busy);
790                 return -EBUSY;
791         }
792         s->vdq.storage = *(void **)&cl[4];
793         /* We use portal index +1 as token so that 0 still indicates
794          * that the result isn't valid yet.
795          */
796         qb_attr_code_encode(&code_pull_token, cl, s->desc.idx + 1);
797         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
798         memcpy(&p[1], &cl[1], 12);
799         /* Set the verb byte, have to substitute in the valid-bit */
800         lwsync();
801         p[0] = cl[0] | s->vdq.valid_bit;
802         s->vdq.valid_bit ^= QB_VALID_BIT;
803         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
804         return 0;
805 }
806
807 /****************/
808 /* Polling DQRR */
809 /****************/
810
811 static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
812 static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
813 static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
814 static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);
815 static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);
816 /* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
817 static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);
818 static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);
819 static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);
820 static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
821
822 #define QBMAN_RESULT_DQ        0x60
823 #define QBMAN_RESULT_FQRN      0x21
824 #define QBMAN_RESULT_FQRNI     0x22
825 #define QBMAN_RESULT_FQPN      0x24
826 #define QBMAN_RESULT_FQDAN     0x25
827 #define QBMAN_RESULT_CDAN      0x26
828 #define QBMAN_RESULT_CSCN_MEM  0x27
829 #define QBMAN_RESULT_CGCU      0x28
830 #define QBMAN_RESULT_BPSCN     0x29
831 #define QBMAN_RESULT_CSCN_WQ   0x2a
832
833 static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);
834
835 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
836  * only once, so repeated calls can return a sequence of DQRR entries, without
837  * requiring they be consumed immediately or in any particular order.
838  */
839 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
840 {
841         uint32_t verb;
842         uint32_t response_verb;
843         uint32_t flags;
844         const struct qbman_result *dq;
845         const uint32_t *p;
846
847         /* Before using valid-bit to detect if something is there, we have to
848          * handle the case of the DQRR reset bug...
849          */
850         if (unlikely(s->dqrr.reset_bug)) {
851                 /* We pick up new entries by cache-inhibited producer index,
852                  * which means that a non-coherent mapping would require us to
853                  * invalidate and read *only* once that PI has indicated that
854                  * there's an entry here. The first trip around the DQRR ring
855                  * will be much less efficient than all subsequent trips around
856                  * it...
857                  */
858                 uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
859                 uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
860                 /* there are new entries if pi != next_idx */
861                 if (pi == s->dqrr.next_idx)
862                         return NULL;
863                 /* if next_idx is/was the last ring index, and 'pi' is
864                  * different, we can disable the workaround as all the ring
865                  * entries have now been DMA'd to so valid-bit checking is
866                  * repaired. Note: this logic needs to be based on next_idx
867                  * (which increments one at a time), rather than on pi (which
868                  * can burst and wrap-around between our snapshots of it).
869                  */
870                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
871                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
872                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
873                                  s->dqrr.next_idx, pi);
874                         s->dqrr.reset_bug = 0;
875                 }
876                 qbman_cena_invalidate_prefetch(&s->sys,
877                                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
878         }
879         dq = qbman_cena_read_wo_shadow(&s->sys,
880                                        QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
881         p = qb_cl(dq);
882         verb = qb_attr_code_decode(&code_dqrr_verb, p);
883         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
884          * in the DQRR reset bug workaround, we shouldn't need to skip these
885          * check, because we've already determined that a new entry is available
886          * and we've invalidated the cacheline before reading it, so the
887          * valid-bit behaviour is repaired and should tell us what we already
888          * knew from reading PI.
889          */
890         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
891                 return NULL;
892
893         /* There's something there. Move "next_idx" attention to the next ring
894          * entry (and prefetch it) before returning what we found.
895          */
896         s->dqrr.next_idx++;
897         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
898                 s->dqrr.next_idx = 0;
899                 s->dqrr.valid_bit ^= QB_VALID_BIT;
900         }
901         /* If this is the final response to a volatile dequeue command
902          * indicate that the vdq is no longer busy.
903          */
904         flags = qbman_result_DQ_flags(dq);
905         response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
906         if ((response_verb == QBMAN_RESULT_DQ) &&
907             (flags & QBMAN_DQ_STAT_VOLATILE) &&
908             (flags & QBMAN_DQ_STAT_EXPIRED))
909                 atomic_inc(&s->vdq.busy);
910
911         return dq;
912 }
913
914 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
915 void qbman_swp_dqrr_consume(struct qbman_swp *s,
916                             const struct qbman_result *dq)
917 {
918         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
919 }
920
921 /*********************************/
922 /* Polling user-provided storage */
923 /*********************************/
924
925 int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s,
926                                 const struct qbman_result *dq)
927 {
928         /* To avoid converting the little-endian DQ entry to host-endian prior
929          * to us knowing whether there is a valid entry or not (and run the
930          * risk of corrupting the incoming hardware LE write), we detect in
931          * hardware endianness rather than host. This means we need a different
932          * "code" depending on whether we are BE or LE in software, which is
933          * where DQRR_TOK_OFFSET comes in...
934          */
935         static struct qb_attr_code code_dqrr_tok_detect =
936                                         QB_CODE(0, DQRR_TOK_OFFSET, 8);
937         /* The user trying to poll for a result treats "dq" as const. It is
938          * however the same address that was provided to us non-const in the
939          * first place, for directing hardware DMA to. So we can cast away the
940          * const because it is mutable from our perspective.
941          */
942         uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
943         uint32_t token;
944
945         token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
946         if (token == 0)
947                 return 0;
948         /* Entry is valid - overwrite token back to 0 so
949          * a) If this memory is reused tokesn will be 0
950          * b) If someone calls "has_new_result()" again on this entry it
951          *    will not appear to be new
952          */
953         qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);
954
955         /* Only now do we convert from hardware to host endianness. Also, as we
956          * are returning success, the user has promised not to call us again, so
957          * there's no risk of us converting the endianness twice...
958          */
959         make_le32_n(p, 16);
960         return 1;
961 }
962
963 int qbman_check_command_complete(struct qbman_swp *s,
964                                  const struct qbman_result *dq)
965 {
966         /* To avoid converting the little-endian DQ entry to host-endian prior
967          * to us knowing whether there is a valid entry or not (and run the
968          * risk of corrupting the incoming hardware LE write), we detect in
969          * hardware endianness rather than host. This means we need a different
970          * "code" depending on whether we are BE or LE in software, which is
971          * where DQRR_TOK_OFFSET comes in...
972          */
973         static struct qb_attr_code code_dqrr_tok_detect =
974                                         QB_CODE(0, DQRR_TOK_OFFSET, 8);
975         /* The user trying to poll for a result treats "dq" as const. It is
976          * however the same address that was provided to us non-const in the
977          * first place, for directing hardware DMA to. So we can cast away the
978          * const because it is mutable from our perspective.
979          */
980         uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
981         uint32_t token;
982
983         token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
984         if (token == 0)
985                 return 0;
986         /* TODO: Remove qbman_swp from parameters and make it a local
987          * once we've tested the reserve portal map change
988          */
989         s = portal_idx_map[token - 1];
990         /* When token is set it indicates that VDQ command has been fetched
991          * by qbman and is working on it. It is safe for software to issue
992          * another VDQ command, so incrementing the busy variable.
993          */
994         if (s->vdq.storage == dq) {
995                 s->vdq.storage = NULL;
996                 atomic_inc(&s->vdq.busy);
997         }
998         return 1;
999 }
1000
1001 /********************************/
1002 /* Categorising qbman results   */
1003 /********************************/
1004
1005 static struct qb_attr_code code_result_in_mem =
1006                         QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);
1007
1008 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1009                                       uint32_t x)
1010 {
1011         const uint32_t *p = qb_cl(dq);
1012         uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
1013
1014         return (response_verb == x);
1015 }
1016
1017 static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq,
1018                                              uint32_t x)
1019 {
1020         const uint32_t *p = qb_cl(dq);
1021         uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);
1022
1023         return (response_verb == x);
1024 }
1025
1026 int qbman_result_is_DQ(const struct qbman_result *dq)
1027 {
1028         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1029 }
1030
1031 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1032 {
1033         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1034 }
1035
1036 int qbman_result_is_CDAN(const struct qbman_result *dq)
1037 {
1038         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1039 }
1040
1041 int qbman_result_is_CSCN(const struct qbman_result *dq)
1042 {
1043         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||
1044                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1045 }
1046
1047 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1048 {
1049         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);
1050 }
1051
1052 int qbman_result_is_CGCU(const struct qbman_result *dq)
1053 {
1054         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);
1055 }
1056
1057 int qbman_result_is_FQRN(const struct qbman_result *dq)
1058 {
1059         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);
1060 }
1061
1062 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1063 {
1064         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);
1065 }
1066
1067 int qbman_result_is_FQPN(const struct qbman_result *dq)
1068 {
1069         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1070 }
1071
1072 /*********************************/
1073 /* Parsing frame dequeue results */
1074 /*********************************/
1075
1076 /* These APIs assume qbman_result_is_DQ() is TRUE */
1077
1078 uint32_t qbman_result_DQ_flags(const struct qbman_result *dq)
1079 {
1080         const uint32_t *p = qb_cl(dq);
1081
1082         return qb_attr_code_decode(&code_dqrr_stat, p);
1083 }
1084
1085 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1086 {
1087         const uint32_t *p = qb_cl(dq);
1088
1089         return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);
1090 }
1091
1092 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1093 {
1094         const uint32_t *p = qb_cl(dq);
1095
1096         return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);
1097 }
1098
1099 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1100 {
1101         const uint32_t *p = qb_cl(dq);
1102
1103         return qb_attr_code_decode(&code_dqrr_fqid, p);
1104 }
1105
1106 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1107 {
1108         const uint32_t *p = qb_cl(dq);
1109
1110         return qb_attr_code_decode(&code_dqrr_byte_count, p);
1111 }
1112
1113 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1114 {
1115         const uint32_t *p = qb_cl(dq);
1116
1117         return qb_attr_code_decode(&code_dqrr_frame_count, p);
1118 }
1119
1120 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1121 {
1122         const uint64_t *p = (const uint64_t *)qb_cl(dq);
1123
1124         return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);
1125 }
1126
1127 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1128 {
1129         const uint32_t *p = qb_cl(dq);
1130
1131         return (const struct qbman_fd *)&p[8];
1132 }
1133
1134 /**************************************/
1135 /* Parsing state-change notifications */
1136 /**************************************/
1137
1138 static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);
1139 static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);
1140 static struct qb_attr_code code_scn_state_in_mem =
1141                         QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);
1142 static struct qb_attr_code code_scn_rid_in_mem =
1143                         QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);
1144 static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);
1145
1146 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1147 {
1148         const uint32_t *p = qb_cl(scn);
1149
1150         return (uint8_t)qb_attr_code_decode(&code_scn_state, p);
1151 }
1152
1153 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1154 {
1155         const uint32_t *p = qb_cl(scn);
1156
1157         return qb_attr_code_decode(&code_scn_rid, p);
1158 }
1159
1160 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1161 {
1162         const uint64_t *p = (const uint64_t *)qb_cl(scn);
1163
1164         return qb_attr_code_decode_64(&code_scn_ctx_lo, p);
1165 }
1166
1167 uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn)
1168 {
1169         const uint32_t *p = qb_cl(scn);
1170
1171         return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);
1172 }
1173
1174 uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)
1175 {
1176         const uint32_t *p = qb_cl(scn);
1177         uint32_t result_rid;
1178
1179         result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);
1180         return make_le24(result_rid);
1181 }
1182
1183 /*****************/
1184 /* Parsing BPSCN */
1185 /*****************/
1186 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1187 {
1188         return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;
1189 }
1190
1191 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1192 {
1193         return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);
1194 }
1195
1196 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1197 {
1198         return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);
1199 }
1200
1201 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1202 {
1203         return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);
1204 }
1205
1206 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1207 {
1208         uint64_t ctx;
1209         uint32_t ctx_hi, ctx_lo;
1210
1211         ctx = qbman_result_SCN_ctx(scn);
1212         ctx_hi = upper32(ctx);
1213         ctx_lo = lower32(ctx);
1214         return ((uint64_t)make_le32(ctx_hi) << 32 |
1215                 (uint64_t)make_le32(ctx_lo));
1216 }
1217
1218 /*****************/
1219 /* Parsing CGCU  */
1220 /*****************/
1221 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1222 {
1223         return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;
1224 }
1225
1226 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1227 {
1228         uint64_t ctx;
1229         uint32_t ctx_hi, ctx_lo;
1230
1231         ctx = qbman_result_SCN_ctx(scn);
1232         ctx_hi = upper32(ctx);
1233         ctx_lo = lower32(ctx);
1234         return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) |
1235                 (uint64_t)make_le32(ctx_lo);
1236 }
1237
1238 /******************/
1239 /* Buffer release */
1240 /******************/
1241
1242 /* These should be const, eventually */
1243 /* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
1244 static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
1245 static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);
1246 static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
1247
1248 void qbman_release_desc_clear(struct qbman_release_desc *d)
1249 {
1250         uint32_t *cl;
1251
1252         memset(d, 0, sizeof(*d));
1253         cl = qb_cl(d);
1254         qb_attr_code_encode(&code_release_set_me, cl, 1);
1255 }
1256
1257 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
1258 {
1259         uint32_t *cl = qb_cl(d);
1260
1261         qb_attr_code_encode(&code_release_bpid, cl, bpid);
1262 }
1263
1264 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1265 {
1266         uint32_t *cl = qb_cl(d);
1267
1268         qb_attr_code_encode(&code_release_rcdi, cl, !!enable);
1269 }
1270
1271 #define RAR_IDX(rar)     ((rar) & 0x7)
1272 #define RAR_VB(rar)      ((rar) & 0x80)
1273 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1274
1275 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1276                       const uint64_t *buffers, unsigned int num_buffers)
1277 {
1278         uint32_t *p;
1279         const uint32_t *cl = qb_cl(d);
1280         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1281
1282         pr_debug("RAR=%08x\n", rar);
1283         if (!RAR_SUCCESS(rar))
1284                 return -EBUSY;
1285         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1286         /* Start the release command */
1287         p = qbman_cena_write_start_wo_shadow(&s->sys,
1288                                              QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1289         /* Copy the caller's buffer pointers to the command */
1290         u64_to_le32_copy(&p[2], buffers, num_buffers);
1291         /* Set the verb byte, have to substitute in the valid-bit and the number
1292          * of buffers.
1293          */
1294         lwsync();
1295         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1296         qbman_cena_write_complete_wo_shadow(&s->sys,
1297                                             QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1298         return 0;
1299 }
1300
1301 /*******************/
1302 /* Buffer acquires */
1303 /*******************/
1304
1305 /* These should be const, eventually */
1306 static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
1307 static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
1308 static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
1309
1310 int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
1311                       unsigned int num_buffers)
1312 {
1313         uint32_t *p;
1314         uint32_t rslt, num;
1315
1316         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1317
1318         /* Start the management command */
1319         p = qbman_swp_mc_start(s);
1320
1321         if (!p)
1322                 return -EBUSY;
1323
1324         /* Encode the caller-provided attributes */
1325         qb_attr_code_encode(&code_acquire_bpid, p, bpid);
1326         qb_attr_code_encode(&code_acquire_num, p, num_buffers);
1327
1328         /* Complete the management command */
1329         p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
1330
1331         /* Decode the outcome */
1332         rslt = qb_attr_code_decode(&code_generic_rslt, p);
1333         num = qb_attr_code_decode(&code_acquire_r_num, p);
1334         QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) !=
1335                      QBMAN_MC_ACQUIRE);
1336
1337         /* Determine success or failure */
1338         if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1339                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1340                        bpid, rslt);
1341                 return -EIO;
1342         }
1343         QBMAN_BUG_ON(num > num_buffers);
1344         /* Copy the acquired buffers to the caller's array */
1345         u64_from_le32_copy(buffers, &p[2], num);
1346         return (int)num;
1347 }
1348
1349 /*****************/
1350 /* FQ management */
1351 /*****************/
1352
1353 static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);
1354
1355 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1356                                   uint8_t alt_fq_verb)
1357 {
1358         uint32_t *p;
1359         uint32_t rslt;
1360
1361         /* Start the management command */
1362         p = qbman_swp_mc_start(s);
1363         if (!p)
1364                 return -EBUSY;
1365
1366         qb_attr_code_encode(&code_fqalt_fqid, p, fqid);
1367         /* Complete the management command */
1368         p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);
1369
1370         /* Decode the outcome */
1371         rslt = qb_attr_code_decode(&code_generic_rslt, p);
1372         QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb);
1373
1374         /* Determine success or failure */
1375         if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1376                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1377                        fqid, alt_fq_verb, rslt);
1378                 return -EIO;
1379         }
1380
1381         return 0;
1382 }
1383
1384 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1385 {
1386         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1387 }
1388
1389 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1390 {
1391         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1392 }
1393
1394 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1395 {
1396         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1397 }
1398
1399 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1400 {
1401         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1402 }
1403
1404 /**********************/
1405 /* Channel management */
1406 /**********************/
1407
1408 static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);
1409 static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);
1410 static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);
1411 static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);
1412
1413 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1414  * would be irresponsible to expose it.
1415  */
1416 #define CODE_CDAN_WE_EN    0x1
1417 #define CODE_CDAN_WE_CTX   0x4
1418
1419 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1420                               uint8_t we_mask, uint8_t cdan_en,
1421                               uint64_t ctx)
1422 {
1423         uint32_t *p;
1424         uint32_t rslt;
1425
1426         /* Start the management command */
1427         p = qbman_swp_mc_start(s);
1428         if (!p)
1429                 return -EBUSY;
1430
1431         /* Encode the caller-provided attributes */
1432         qb_attr_code_encode(&code_cdan_cid, p, channelid);
1433         qb_attr_code_encode(&code_cdan_we, p, we_mask);
1434         qb_attr_code_encode(&code_cdan_en, p, cdan_en);
1435         qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);
1436         /* Complete the management command */
1437         p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);
1438
1439         /* Decode the outcome */
1440         rslt = qb_attr_code_decode(&code_generic_rslt, p);
1441         QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p)
1442                                         != QBMAN_WQCHAN_CONFIGURE);
1443
1444         /* Determine success or failure */
1445         if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1446                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1447                        channelid, rslt);
1448                 return -EIO;
1449         }
1450
1451         return 0;
1452 }
1453
1454 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1455                                uint64_t ctx)
1456 {
1457         return qbman_swp_CDAN_set(s, channelid,
1458                                   CODE_CDAN_WE_CTX,
1459                                   0, ctx);
1460 }
1461
1462 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1463 {
1464         return qbman_swp_CDAN_set(s, channelid,
1465                                   CODE_CDAN_WE_EN,
1466                                   1, 0);
1467 }
1468
1469 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1470 {
1471         return qbman_swp_CDAN_set(s, channelid,
1472                                   CODE_CDAN_WE_EN,
1473                                   0, 0);
1474 }
1475
1476 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1477                                       uint64_t ctx)
1478 {
1479         return qbman_swp_CDAN_set(s, channelid,
1480                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1481                                   1, ctx);
1482 }
1483
1484 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1485 {
1486         return QBMAN_IDX_FROM_DQRR(dqrr);
1487 }
1488
1489 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1490 {
1491         struct qbman_result *dq;
1492
1493         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
1494         return dq;
1495 }
1496
1497 int qbman_swp_send_multiple(struct qbman_swp *s,
1498                             const struct qbman_eq_desc *d,
1499                             const struct qbman_fd *fd,
1500                             int frames_to_send)
1501 {
1502         uint32_t *p;
1503         const uint32_t *cl = qb_cl(d);
1504         uint32_t eqcr_ci;
1505         uint8_t diff;
1506         int sent = 0;
1507         int i;
1508         int initial_pi = s->eqcr.pi;
1509         uint64_t start_pointer;
1510
1511         if (!s->eqcr.available) {
1512                 eqcr_ci = s->eqcr.ci;
1513                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1514                                  QBMAN_CENA_SWP_EQCR_CI) & 0xF;
1515                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
1516                                    eqcr_ci, s->eqcr.ci);
1517                 if (!diff)
1518                         goto done;
1519                 s->eqcr.available += diff;
1520         }
1521
1522         /* we are trying to send frames_to_send,
1523          * if we have enough space in the ring
1524          */
1525         while (s->eqcr.available && frames_to_send--) {
1526                 p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
1527                                         QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
1528                 /* Write command (except of first byte) and FD */
1529                 memcpy(&p[1], &cl[1], 7 * 4);
1530                 memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd));
1531
1532                 initial_pi++;
1533                 initial_pi &= 0xF;
1534                 s->eqcr.available--;
1535                 sent++;
1536         }
1537
1538 done:
1539         initial_pi =  s->eqcr.pi;
1540         lwsync();
1541
1542         /* in order for flushes to complete faster:
1543          * we use a following trick: we record all lines in 32 bit word
1544          */
1545
1546         initial_pi =  s->eqcr.pi;
1547         for (i = 0; i < sent; i++) {
1548                 p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
1549                                         QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
1550
1551                 p[0] = cl[0] | s->eqcr.pi_vb;
1552                 initial_pi++;
1553                 initial_pi &= 0xF;
1554
1555                 if (!(initial_pi & 7))
1556                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1557         }
1558
1559         initial_pi = s->eqcr.pi;
1560
1561         /* We need  to flush all the lines but without
1562          * load/store operations between them.
1563          * We assign start_pointer before we start loop so that
1564          * in loop we do not read it from memory
1565          */
1566         start_pointer = (uint64_t)s->sys.addr_cena;
1567         for (i = 0; i < sent; i++) {
1568                 p = (uint32_t *)(start_pointer
1569                                  + QBMAN_CENA_SWP_EQCR(initial_pi & 7));
1570                 dcbf((uint64_t)p);
1571                 initial_pi++;
1572                 initial_pi &= 0xF;
1573         }
1574
1575         /* Update producer index for the next call */
1576         s->eqcr.pi = initial_pi;
1577
1578         return sent;
1579 }