bus/fslmc: support enqueue with multiple descriptors
[dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *     * Redistributions of source code must retain the above copyright
9  *       notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above copyright
11  *       notice, this list of conditions and the following disclaimer in the
12  *       documentation and/or other materials provided with the distribution.
13  *     * Neither the name of Freescale Semiconductor nor the
14  *       names of its contributors may be used to endorse or promote products
15  *       derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 #include "qbman_portal.h"
30
31 /* QBMan portal management command codes */
32 #define QBMAN_MC_ACQUIRE       0x30
33 #define QBMAN_WQCHAN_CONFIGURE 0x46
34
35 /* CINH register offsets */
36 #define QBMAN_CINH_SWP_EQCR_PI 0x800
37 #define QBMAN_CINH_SWP_EQCR_CI 0x840
38 #define QBMAN_CINH_SWP_EQAR    0x8c0
39 #define QBMAN_CINH_SWP_DQPI    0xa00
40 #define QBMAN_CINH_SWP_DCAP    0xac0
41 #define QBMAN_CINH_SWP_SDQCR   0xb00
42 #define QBMAN_CINH_SWP_RAR     0xcc0
43 #define QBMAN_CINH_SWP_ISR     0xe00
44 #define QBMAN_CINH_SWP_IER     0xe40
45 #define QBMAN_CINH_SWP_ISDR    0xe80
46 #define QBMAN_CINH_SWP_IIR     0xec0
47
48 /* CENA register offsets */
49 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
50 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
51 #define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_CR      0x600
53 #define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((uint32_t)(vb) >> 1))
54 #define QBMAN_CENA_SWP_VDQCR   0x780
55 #define QBMAN_CENA_SWP_EQCR_CI 0x840
56
57 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
58 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
59
60 /* QBMan FQ management command codes */
61 #define QBMAN_FQ_SCHEDULE       0x48
62 #define QBMAN_FQ_FORCE          0x49
63 #define QBMAN_FQ_XON            0x4d
64 #define QBMAN_FQ_XOFF           0x4e
65
66 /*******************************/
67 /* Pre-defined attribute codes */
68 /*******************************/
69
70 struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
71 struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
72
73 /*************************/
74 /* SDQCR attribute codes */
75 /*************************/
76
77 /* we put these here because at least some of them are required by
78  * qbman_swp_init()
79  */
80 struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
81 struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
82 struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
83 static struct qb_attr_code code_eq_dca_idx;
84 #define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
85 enum qbman_sdqcr_dct {
86         qbman_sdqcr_dct_null = 0,
87         qbman_sdqcr_dct_prio_ics,
88         qbman_sdqcr_dct_active_ics,
89         qbman_sdqcr_dct_active
90 };
91
92 enum qbman_sdqcr_fc {
93         qbman_sdqcr_fc_one = 0,
94         qbman_sdqcr_fc_up_to_3 = 1
95 };
96
97 struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);
98
99 /* We need to keep track of which SWP triggered a pull command
100  * so keep an array of portal IDs and use the token field to
101  * be able to find the proper portal
102  */
103 #define MAX_QBMAN_PORTALS  35
104 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
105
106 uint32_t qman_version;
107
108 /*********************************/
109 /* Portal constructor/destructor */
110 /*********************************/
111
112 /* Software portals should always be in the power-on state when we initialise,
113  * due to the CCSR-based portal reset functionality that MC has.
114  *
115  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
116  * valid-bits, so we need to support a workaround where we don't trust
117  * valid-bits when detecting new entries until any stale ring entries have been
118  * overwritten at least once. The idea is that we read PI for the first few
119  * entries, then switch to valid-bit after that. The trick is to clear the
120  * bug-work-around boolean once the PI wraps around the ring for the first time.
121  *
122  * Note: this still carries a slight additional cost once the decrementer hits
123  * zero.
124  */
125 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
126 {
127         int ret;
128         uint32_t eqcr_pi;
129         struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
130
131         if (!p)
132                 return NULL;
133         p->desc = *d;
134 #ifdef QBMAN_CHECKING
135         p->mc.check = swp_mc_can_start;
136 #endif
137         p->mc.valid_bit = QB_VALID_BIT;
138         p->sdq = 0;
139         qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
140         qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
141         qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
142         atomic_set(&p->vdq.busy, 1);
143         p->vdq.valid_bit = QB_VALID_BIT;
144         p->dqrr.next_idx = 0;
145         p->dqrr.valid_bit = QB_VALID_BIT;
146         qman_version = p->desc.qman_version;
147         if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
148                 p->dqrr.dqrr_size = 4;
149                 p->dqrr.reset_bug = 1;
150                 /* Set size of DQRR to 4, encoded in 2 bits */
151                 code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 2);
152         } else {
153                 p->dqrr.dqrr_size = 8;
154                 p->dqrr.reset_bug = 0;
155                 /* Set size of DQRR to 8, encoded in 3 bits */
156                 code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 3);
157         }
158
159         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
160         if (ret) {
161                 kfree(p);
162                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
163                 return NULL;
164         }
165         /* SDQCR needs to be initialized to 0 when no channels are
166          * being dequeued from or else the QMan HW will indicate an
167          * error.  The values that were calculated above will be
168          * applied when dequeues from a specific channel are enabled
169          */
170         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
171         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
172         p->eqcr.pi = eqcr_pi & 0xF;
173         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
174         p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
175         p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
176                                                 p->eqcr.ci, p->eqcr.pi);
177
178         portal_idx_map[p->desc.idx] = p;
179         return p;
180 }
181
182 void qbman_swp_finish(struct qbman_swp *p)
183 {
184 #ifdef QBMAN_CHECKING
185         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
186 #endif
187         qbman_swp_sys_finish(&p->sys);
188         portal_idx_map[p->desc.idx] = NULL;
189         kfree(p);
190 }
191
192 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
193 {
194         return &p->desc;
195 }
196
197 /**************/
198 /* Interrupts */
199 /**************/
200
201 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
202 {
203         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
204 }
205
206 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
207 {
208         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
209 }
210
211 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
212 {
213         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
214 }
215
216 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
217 {
218         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
219 }
220
221 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
222 {
223         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
224 }
225
226 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
227 {
228         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
229 }
230
231 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
232 {
233         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
234 }
235
236 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
237 {
238         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
239 }
240
241 /***********************/
242 /* Management commands */
243 /***********************/
244
245 /*
246  * Internal code common to all types of management commands.
247  */
248
249 void *qbman_swp_mc_start(struct qbman_swp *p)
250 {
251         void *ret;
252 #ifdef QBMAN_CHECKING
253         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
254 #endif
255         ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
256 #ifdef QBMAN_CHECKING
257         if (!ret)
258                 p->mc.check = swp_mc_can_submit;
259 #endif
260         return ret;
261 }
262
263 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
264 {
265         uint32_t *v = cmd;
266 #ifdef QBMAN_CHECKING
267         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
268 #endif
269         /* TBD: "|=" is going to hurt performance. Need to move as many fields
270          * out of word zero, and for those that remain, the "OR" needs to occur
271          * at the caller side. This debug check helps to catch cases where the
272          * caller wants to OR but has forgotten to do so.
273          */
274         QBMAN_BUG_ON((*v & cmd_verb) != *v);
275         *v = cmd_verb | p->mc.valid_bit;
276         qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
277 #ifdef QBMAN_CHECKING
278         p->mc.check = swp_mc_can_poll;
279 #endif
280 }
281
282 void *qbman_swp_mc_result(struct qbman_swp *p)
283 {
284         uint32_t *ret, verb;
285 #ifdef QBMAN_CHECKING
286         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
287 #endif
288         qbman_cena_invalidate_prefetch(&p->sys,
289                                        QBMAN_CENA_SWP_RR(p->mc.valid_bit));
290         ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
291         /* Remove the valid-bit - command completed if the rest is non-zero */
292         verb = ret[0] & ~QB_VALID_BIT;
293         if (!verb)
294                 return NULL;
295 #ifdef QBMAN_CHECKING
296         p->mc.check = swp_mc_can_start;
297 #endif
298         p->mc.valid_bit ^= QB_VALID_BIT;
299         return ret;
300 }
301
302 /***********/
303 /* Enqueue */
304 /***********/
305
306 /* These should be const, eventually */
307 static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
308 static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);
309 static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);
310 static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);
311 /* Can't set code_eq_dca_idx width. Need qman version. Read at runtime */
312 static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
313 static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);
314 static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);
315 static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);
316 static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);
317 static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
318 /* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
319 static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
320 static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
321 static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
322 static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
323 static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);
324 static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
325
326 enum qbman_eq_cmd_e {
327         /* No enqueue, primarily for plugging ORP gaps for dropped frames */
328         qbman_eq_cmd_empty,
329         /* DMA an enqueue response once complete */
330         qbman_eq_cmd_respond,
331         /* DMA an enqueue response only if the enqueue fails */
332         qbman_eq_cmd_respond_reject
333 };
334
335 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
336 {
337         memset(d, 0, sizeof(*d));
338 }
339
340 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
341 {
342         uint32_t *cl = qb_cl(d);
343
344         qb_attr_code_encode(&code_eq_orp_en, cl, 0);
345         qb_attr_code_encode(&code_eq_cmd, cl,
346                             respond_success ? qbman_eq_cmd_respond :
347                                               qbman_eq_cmd_respond_reject);
348 }
349
350 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
351                            uint32_t opr_id, uint32_t seqnum, int incomplete)
352 {
353         uint32_t *cl = qb_cl(d);
354
355         qb_attr_code_encode(&code_eq_orp_en, cl, 1);
356         qb_attr_code_encode(&code_eq_cmd, cl,
357                             respond_success ? qbman_eq_cmd_respond :
358                                               qbman_eq_cmd_respond_reject);
359         qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
360         qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
361         qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);
362 }
363
364 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
365                                 uint32_t seqnum)
366 {
367         uint32_t *cl = qb_cl(d);
368
369         qb_attr_code_encode(&code_eq_orp_en, cl, 1);
370         qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
371         qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
372         qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
373         qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
374         qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);
375 }
376
377 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
378                                 uint32_t seqnum)
379 {
380         uint32_t *cl = qb_cl(d);
381
382         qb_attr_code_encode(&code_eq_orp_en, cl, 1);
383         qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
384         qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
385         qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
386         qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
387         qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);
388 }
389
390 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
391                                 dma_addr_t storage_phys,
392                                 int stash)
393 {
394         uint32_t *cl = qb_cl(d);
395
396         qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
397         qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
398 }
399
400 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
401 {
402         uint32_t *cl = qb_cl(d);
403
404         qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);
405 }
406
407 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
408 {
409         uint32_t *cl = qb_cl(d);
410
411         qb_attr_code_encode(&code_eq_qd_en, cl, 0);
412         qb_attr_code_encode(&code_eq_tgt_id, cl, fqid);
413 }
414
415 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
416                           uint32_t qd_bin, uint32_t qd_prio)
417 {
418         uint32_t *cl = qb_cl(d);
419
420         qb_attr_code_encode(&code_eq_qd_en, cl, 1);
421         qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
422         qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
423         qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
424 }
425
426 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
427 {
428         uint32_t *cl = qb_cl(d);
429
430         qb_attr_code_encode(&code_eq_eqdi, cl, !!enable);
431 }
432
433 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
434                            uint32_t dqrr_idx, int park)
435 {
436         uint32_t *cl = qb_cl(d);
437
438         qb_attr_code_encode(&code_eq_dca_en, cl, !!enable);
439         if (enable) {
440                 qb_attr_code_encode(&code_eq_dca_pk, cl, !!park);
441                 qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);
442         }
443 }
444
445 #define EQAR_IDX(eqar)     ((eqar) & 0x7)
446 #define EQAR_VB(eqar)      ((eqar) & 0x80)
447 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
448 static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
449                                         const struct qbman_eq_desc *d,
450                                  const struct qbman_fd *fd)
451 {
452         uint32_t *p;
453         const uint32_t *cl = qb_cl(d);
454         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
455
456         pr_debug("EQAR=%08x\n", eqar);
457         if (!EQAR_SUCCESS(eqar))
458                 return -EBUSY;
459         p = qbman_cena_write_start_wo_shadow(&s->sys,
460                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
461         word_copy(&p[1], &cl[1], 7);
462         word_copy(&p[8], fd, sizeof(*fd) >> 2);
463         /* Set the verb byte, have to substitute in the valid-bit */
464         lwsync();
465         p[0] = cl[0] | EQAR_VB(eqar);
466         qbman_cena_write_complete_wo_shadow(&s->sys,
467                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
468         return 0;
469 }
470
471 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
472                                        const struct qbman_eq_desc *d,
473                                 const struct qbman_fd *fd)
474 {
475         uint32_t *p;
476         const uint32_t *cl = qb_cl(d);
477         uint32_t eqcr_ci;
478         uint8_t diff;
479
480         if (!s->eqcr.available) {
481                 eqcr_ci = s->eqcr.ci;
482                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
483                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
484                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
485                                    eqcr_ci, s->eqcr.ci);
486                 s->eqcr.available += diff;
487                 if (!diff)
488                         return -EBUSY;
489         }
490
491         p = qbman_cena_write_start_wo_shadow(&s->sys,
492                 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
493         word_copy(&p[1], &cl[1], 7);
494         word_copy(&p[8], fd, sizeof(*fd) >> 2);
495         lwsync();
496         /* Set the verb byte, have to substitute in the valid-bit */
497         p[0] = cl[0] | s->eqcr.pi_vb;
498         qbman_cena_write_complete_wo_shadow(&s->sys,
499                 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
500         s->eqcr.pi++;
501         s->eqcr.pi &= 0xF;
502         s->eqcr.available--;
503         if (!(s->eqcr.pi & 7))
504                 s->eqcr.pi_vb ^= QB_VALID_BIT;
505         return 0;
506 }
507
508 int qbman_swp_fill_ring(struct qbman_swp *s,
509                         const struct qbman_eq_desc *d,
510                         const struct qbman_fd *fd,
511                         __attribute__((unused)) uint8_t burst_index)
512 {
513         uint32_t *p;
514         const uint32_t *cl = qb_cl(d);
515         uint32_t eqcr_ci;
516         uint8_t diff;
517
518         if (!s->eqcr.available) {
519                 eqcr_ci = s->eqcr.ci;
520                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
521                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
522                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
523                                    eqcr_ci, s->eqcr.ci);
524                 s->eqcr.available += diff;
525                 if (!diff)
526                         return -EBUSY;
527         }
528         p = qbman_cena_write_start_wo_shadow(&s->sys,
529                 QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7));
530         /* word_copy(&p[1], &cl[1], 7); */
531         memcpy(&p[1], &cl[1], 7 * 4);
532         /* word_copy(&p[8], fd, sizeof(*fd) >> 2); */
533         memcpy(&p[8], fd, sizeof(struct qbman_fd));
534
535         /* lwsync(); */
536         p[0] = cl[0] | s->eqcr.pi_vb;
537
538         s->eqcr.pi++;
539         s->eqcr.pi &= 0xF;
540         s->eqcr.available--;
541         if (!(s->eqcr.pi & 7))
542                 s->eqcr.pi_vb ^= QB_VALID_BIT;
543
544         return 0;
545 }
546
547 int qbman_swp_flush_ring(struct qbman_swp *s)
548 {
549         void *ptr = s->sys.addr_cena;
550
551         dcbf((uint64_t)ptr);
552         dcbf((uint64_t)ptr + 0x40);
553         dcbf((uint64_t)ptr + 0x80);
554         dcbf((uint64_t)ptr + 0xc0);
555         dcbf((uint64_t)ptr + 0x100);
556         dcbf((uint64_t)ptr + 0x140);
557         dcbf((uint64_t)ptr + 0x180);
558         dcbf((uint64_t)ptr + 0x1c0);
559
560         return 0;
561 }
562
563 void qbman_sync(void)
564 {
565         lwsync();
566 }
567
568 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
569                       const struct qbman_fd *fd)
570 {
571         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
572                 return qbman_swp_enqueue_array_mode(s, d, fd);
573         else    /* Use ring mode by default */
574                 return qbman_swp_enqueue_ring_mode(s, d, fd);
575 }
576
577 int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
578                                const struct qbman_eq_desc *d,
579                                const struct qbman_fd *fd,
580                                int num_frames)
581 {
582         uint32_t *p;
583         const uint32_t *cl = qb_cl(d);
584         uint32_t eqcr_ci, eqcr_pi;
585         uint8_t diff;
586         int i, num_enqueued = 0;
587         uint64_t addr_cena;
588
589         if (!s->eqcr.available) {
590                 eqcr_ci = s->eqcr.ci;
591                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
592                                 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
593                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
594                                    eqcr_ci, s->eqcr.ci);
595                 s->eqcr.available += diff;
596                 if (!diff)
597                         return 0;
598         }
599
600         eqcr_pi = s->eqcr.pi;
601         num_enqueued = (s->eqcr.available < num_frames) ?
602                         s->eqcr.available : num_frames;
603         s->eqcr.available -= num_enqueued;
604         /* Fill in the EQCR ring */
605         for (i = 0; i < num_enqueued; i++) {
606                 p = qbman_cena_write_start_wo_shadow(&s->sys,
607                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
608                 memcpy(&p[1], &cl[1], 28);
609                 memcpy(&p[8], &fd[i], sizeof(*fd));
610                 eqcr_pi++;
611                 eqcr_pi &= 0xF;
612                 /*Pointing to the next enqueue descriptor*/
613                 cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
614         }
615
616         lwsync();
617
618         /* Set the verb byte, have to substitute in the valid-bit */
619         eqcr_pi = s->eqcr.pi;
620         cl = qb_cl(d);
621         for (i = 0; i < num_enqueued; i++) {
622                 p = qbman_cena_write_start_wo_shadow(&s->sys,
623                                         QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
624                 p[0] = cl[0] | s->eqcr.pi_vb;
625                 eqcr_pi++;
626                 eqcr_pi &= 0xF;
627                 if (!(eqcr_pi & 7))
628                         s->eqcr.pi_vb ^= QB_VALID_BIT;
629                 /*Pointing to the next enqueue descriptor*/
630                 cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
631         }
632
633         /* Flush all the cacheline without load/store in between */
634         eqcr_pi = s->eqcr.pi;
635         addr_cena = (uint64_t)s->sys.addr_cena;
636         for (i = 0; i < num_enqueued; i++) {
637                 dcbf((uint64_t *)(addr_cena +
638                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
639                 eqcr_pi++;
640                 eqcr_pi &= 0xF;
641         }
642         s->eqcr.pi = eqcr_pi;
643
644         return num_enqueued;
645 }
646
647 /*************************/
648 /* Static (push) dequeue */
649 /*************************/
650
651 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
652 {
653         struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
654
655         QBMAN_BUG_ON(channel_idx > 15);
656         *enabled = (int)qb_attr_code_decode(&code, &s->sdq);
657 }
658
659 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
660 {
661         uint16_t dqsrc;
662         struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
663
664         QBMAN_BUG_ON(channel_idx > 15);
665         qb_attr_code_encode(&code, &s->sdq, !!enable);
666         /* Read make the complete src map.  If no channels are enabled
667          * the SDQCR must be 0 or else QMan will assert errors
668          */
669         dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);
670         if (dqsrc != 0)
671                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
672         else
673                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
674 }
675
676 /***************************/
677 /* Volatile (pull) dequeue */
678 /***************************/
679
680 /* These should be const, eventually */
681 static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
682 static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
683 static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
684 static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
685 static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
686 static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
687 static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
688 static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
689
690 enum qb_pull_dt_e {
691         qb_pull_dt_channel,
692         qb_pull_dt_workqueue,
693         qb_pull_dt_framequeue
694 };
695
696 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
697 {
698         memset(d, 0, sizeof(*d));
699 }
700
701 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
702                                  struct qbman_result *storage,
703                                  dma_addr_t storage_phys,
704                                  int stash)
705 {
706         uint32_t *cl = qb_cl(d);
707         /* Squiggle the pointer 'storage' into the extra 2 words of the
708          * descriptor (which aren't copied to the hw command)
709          */
710         *(void **)&cl[4] = storage;
711         if (!storage) {
712                 qb_attr_code_encode(&code_pull_rls, cl, 0);
713                 return;
714         }
715         qb_attr_code_encode(&code_pull_rls, cl, 1);
716         qb_attr_code_encode(&code_pull_stash, cl, !!stash);
717         qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
718 }
719
720 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
721 {
722         uint32_t *cl = qb_cl(d);
723
724         QBMAN_BUG_ON(!numframes || (numframes > 16));
725         qb_attr_code_encode(&code_pull_numframes, cl,
726                             (uint32_t)(numframes - 1));
727 }
728
729 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
730 {
731         uint32_t *cl = qb_cl(d);
732
733         qb_attr_code_encode(&code_pull_token, cl, token);
734 }
735
736 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
737 {
738         uint32_t *cl = qb_cl(d);
739
740         qb_attr_code_encode(&code_pull_dct, cl, 1);
741         qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
742         qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
743 }
744
745 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
746                             enum qbman_pull_type_e dct)
747 {
748         uint32_t *cl = qb_cl(d);
749
750         qb_attr_code_encode(&code_pull_dct, cl, dct);
751         qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);
752         qb_attr_code_encode(&code_pull_dqsource, cl, wqid);
753 }
754
755 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
756                                  enum qbman_pull_type_e dct)
757 {
758         uint32_t *cl = qb_cl(d);
759
760         qb_attr_code_encode(&code_pull_dct, cl, dct);
761         qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);
762         qb_attr_code_encode(&code_pull_dqsource, cl, chid);
763 }
764
765 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
766 {
767         uint32_t *p;
768         uint32_t *cl = qb_cl(d);
769
770         if (!atomic_dec_and_test(&s->vdq.busy)) {
771                 atomic_inc(&s->vdq.busy);
772                 return -EBUSY;
773         }
774         s->vdq.storage = *(void **)&cl[4];
775         /* We use portal index +1 as token so that 0 still indicates
776          * that the result isn't valid yet.
777          */
778         qb_attr_code_encode(&code_pull_token, cl, s->desc.idx + 1);
779         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
780         word_copy(&p[1], &cl[1], 3);
781         /* Set the verb byte, have to substitute in the valid-bit */
782         lwsync();
783         p[0] = cl[0] | s->vdq.valid_bit;
784         s->vdq.valid_bit ^= QB_VALID_BIT;
785         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
786         return 0;
787 }
788
789 /****************/
790 /* Polling DQRR */
791 /****************/
792
793 static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
794 static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
795 static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
796 static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);
797 static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);
798 /* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
799 static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);
800 static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);
801 static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);
802 static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
803
804 #define QBMAN_RESULT_DQ        0x60
805 #define QBMAN_RESULT_FQRN      0x21
806 #define QBMAN_RESULT_FQRNI     0x22
807 #define QBMAN_RESULT_FQPN      0x24
808 #define QBMAN_RESULT_FQDAN     0x25
809 #define QBMAN_RESULT_CDAN      0x26
810 #define QBMAN_RESULT_CSCN_MEM  0x27
811 #define QBMAN_RESULT_CGCU      0x28
812 #define QBMAN_RESULT_BPSCN     0x29
813 #define QBMAN_RESULT_CSCN_WQ   0x2a
814
815 static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);
816
817 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
818  * only once, so repeated calls can return a sequence of DQRR entries, without
819  * requiring they be consumed immediately or in any particular order.
820  */
821 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
822 {
823         uint32_t verb;
824         uint32_t response_verb;
825         uint32_t flags;
826         const struct qbman_result *dq;
827         const uint32_t *p;
828
829         /* Before using valid-bit to detect if something is there, we have to
830          * handle the case of the DQRR reset bug...
831          */
832         if (unlikely(s->dqrr.reset_bug)) {
833                 /* We pick up new entries by cache-inhibited producer index,
834                  * which means that a non-coherent mapping would require us to
835                  * invalidate and read *only* once that PI has indicated that
836                  * there's an entry here. The first trip around the DQRR ring
837                  * will be much less efficient than all subsequent trips around
838                  * it...
839                  */
840                 uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
841                 uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
842                 /* there are new entries if pi != next_idx */
843                 if (pi == s->dqrr.next_idx)
844                         return NULL;
845                 /* if next_idx is/was the last ring index, and 'pi' is
846                  * different, we can disable the workaround as all the ring
847                  * entries have now been DMA'd to so valid-bit checking is
848                  * repaired. Note: this logic needs to be based on next_idx
849                  * (which increments one at a time), rather than on pi (which
850                  * can burst and wrap-around between our snapshots of it).
851                  */
852                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
853                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
854                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
855                                  s->dqrr.next_idx, pi);
856                         s->dqrr.reset_bug = 0;
857                 }
858                 qbman_cena_invalidate_prefetch(&s->sys,
859                                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
860         }
861         dq = qbman_cena_read_wo_shadow(&s->sys,
862                                        QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
863         p = qb_cl(dq);
864         verb = qb_attr_code_decode(&code_dqrr_verb, p);
865         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
866          * in the DQRR reset bug workaround, we shouldn't need to skip these
867          * check, because we've already determined that a new entry is available
868          * and we've invalidated the cacheline before reading it, so the
869          * valid-bit behaviour is repaired and should tell us what we already
870          * knew from reading PI.
871          */
872         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
873                 return NULL;
874
875         /* There's something there. Move "next_idx" attention to the next ring
876          * entry (and prefetch it) before returning what we found.
877          */
878         s->dqrr.next_idx++;
879         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
880                 s->dqrr.next_idx = 0;
881                 s->dqrr.valid_bit ^= QB_VALID_BIT;
882         }
883         /* If this is the final response to a volatile dequeue command
884          * indicate that the vdq is no longer busy.
885          */
886         flags = qbman_result_DQ_flags(dq);
887         response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
888         if ((response_verb == QBMAN_RESULT_DQ) &&
889             (flags & QBMAN_DQ_STAT_VOLATILE) &&
890             (flags & QBMAN_DQ_STAT_EXPIRED))
891                 atomic_inc(&s->vdq.busy);
892
893         return dq;
894 }
895
896 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
897 void qbman_swp_dqrr_consume(struct qbman_swp *s,
898                             const struct qbman_result *dq)
899 {
900         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
901 }
902
903 /*********************************/
904 /* Polling user-provided storage */
905 /*********************************/
906
907 int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s,
908                                 const struct qbman_result *dq)
909 {
910         /* To avoid converting the little-endian DQ entry to host-endian prior
911          * to us knowing whether there is a valid entry or not (and run the
912          * risk of corrupting the incoming hardware LE write), we detect in
913          * hardware endianness rather than host. This means we need a different
914          * "code" depending on whether we are BE or LE in software, which is
915          * where DQRR_TOK_OFFSET comes in...
916          */
917         static struct qb_attr_code code_dqrr_tok_detect =
918                                         QB_CODE(0, DQRR_TOK_OFFSET, 8);
919         /* The user trying to poll for a result treats "dq" as const. It is
920          * however the same address that was provided to us non-const in the
921          * first place, for directing hardware DMA to. So we can cast away the
922          * const because it is mutable from our perspective.
923          */
924         uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
925         uint32_t token;
926
927         token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
928         if (token == 0)
929                 return 0;
930         /* Entry is valid - overwrite token back to 0 so
931          * a) If this memory is reused tokesn will be 0
932          * b) If someone calls "has_new_result()" again on this entry it
933          *    will not appear to be new
934          */
935         qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);
936
937         /* Only now do we convert from hardware to host endianness. Also, as we
938          * are returning success, the user has promised not to call us again, so
939          * there's no risk of us converting the endianness twice...
940          */
941         make_le32_n(p, 16);
942         return 1;
943 }
944
945 int qbman_check_command_complete(struct qbman_swp *s,
946                                  const struct qbman_result *dq)
947 {
948         /* To avoid converting the little-endian DQ entry to host-endian prior
949          * to us knowing whether there is a valid entry or not (and run the
950          * risk of corrupting the incoming hardware LE write), we detect in
951          * hardware endianness rather than host. This means we need a different
952          * "code" depending on whether we are BE or LE in software, which is
953          * where DQRR_TOK_OFFSET comes in...
954          */
955         static struct qb_attr_code code_dqrr_tok_detect =
956                                         QB_CODE(0, DQRR_TOK_OFFSET, 8);
957         /* The user trying to poll for a result treats "dq" as const. It is
958          * however the same address that was provided to us non-const in the
959          * first place, for directing hardware DMA to. So we can cast away the
960          * const because it is mutable from our perspective.
961          */
962         uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
963         uint32_t token;
964
965         token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
966         if (token == 0)
967                 return 0;
968         /* TODO: Remove qbman_swp from parameters and make it a local
969          * once we've tested the reserve portal map change
970          */
971         s = portal_idx_map[token - 1];
972         /* When token is set it indicates that VDQ command has been fetched
973          * by qbman and is working on it. It is safe for software to issue
974          * another VDQ command, so incrementing the busy variable.
975          */
976         if (s->vdq.storage == dq) {
977                 s->vdq.storage = NULL;
978                 atomic_inc(&s->vdq.busy);
979         }
980         return 1;
981 }
982
983 /********************************/
984 /* Categorising qbman results   */
985 /********************************/
986
987 static struct qb_attr_code code_result_in_mem =
988                         QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);
989
990 static inline int __qbman_result_is_x(const struct qbman_result *dq,
991                                       uint32_t x)
992 {
993         const uint32_t *p = qb_cl(dq);
994         uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
995
996         return (response_verb == x);
997 }
998
999 static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq,
1000                                              uint32_t x)
1001 {
1002         const uint32_t *p = qb_cl(dq);
1003         uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);
1004
1005         return (response_verb == x);
1006 }
1007
1008 int qbman_result_is_DQ(const struct qbman_result *dq)
1009 {
1010         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1011 }
1012
1013 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1014 {
1015         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1016 }
1017
1018 int qbman_result_is_CDAN(const struct qbman_result *dq)
1019 {
1020         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1021 }
1022
1023 int qbman_result_is_CSCN(const struct qbman_result *dq)
1024 {
1025         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||
1026                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1027 }
1028
1029 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1030 {
1031         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);
1032 }
1033
1034 int qbman_result_is_CGCU(const struct qbman_result *dq)
1035 {
1036         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);
1037 }
1038
1039 int qbman_result_is_FQRN(const struct qbman_result *dq)
1040 {
1041         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);
1042 }
1043
1044 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1045 {
1046         return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);
1047 }
1048
1049 int qbman_result_is_FQPN(const struct qbman_result *dq)
1050 {
1051         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1052 }
1053
1054 /*********************************/
1055 /* Parsing frame dequeue results */
1056 /*********************************/
1057
1058 /* These APIs assume qbman_result_is_DQ() is TRUE */
1059
1060 uint32_t qbman_result_DQ_flags(const struct qbman_result *dq)
1061 {
1062         const uint32_t *p = qb_cl(dq);
1063
1064         return qb_attr_code_decode(&code_dqrr_stat, p);
1065 }
1066
1067 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1068 {
1069         const uint32_t *p = qb_cl(dq);
1070
1071         return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);
1072 }
1073
1074 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1075 {
1076         const uint32_t *p = qb_cl(dq);
1077
1078         return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);
1079 }
1080
1081 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1082 {
1083         const uint32_t *p = qb_cl(dq);
1084
1085         return qb_attr_code_decode(&code_dqrr_fqid, p);
1086 }
1087
1088 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1089 {
1090         const uint32_t *p = qb_cl(dq);
1091
1092         return qb_attr_code_decode(&code_dqrr_byte_count, p);
1093 }
1094
1095 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1096 {
1097         const uint32_t *p = qb_cl(dq);
1098
1099         return qb_attr_code_decode(&code_dqrr_frame_count, p);
1100 }
1101
1102 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1103 {
1104         const uint64_t *p = (const uint64_t *)qb_cl(dq);
1105
1106         return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);
1107 }
1108
1109 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1110 {
1111         const uint32_t *p = qb_cl(dq);
1112
1113         return (const struct qbman_fd *)&p[8];
1114 }
1115
1116 /**************************************/
1117 /* Parsing state-change notifications */
1118 /**************************************/
1119
1120 static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);
1121 static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);
1122 static struct qb_attr_code code_scn_state_in_mem =
1123                         QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);
1124 static struct qb_attr_code code_scn_rid_in_mem =
1125                         QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);
1126 static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);
1127
1128 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1129 {
1130         const uint32_t *p = qb_cl(scn);
1131
1132         return (uint8_t)qb_attr_code_decode(&code_scn_state, p);
1133 }
1134
1135 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1136 {
1137         const uint32_t *p = qb_cl(scn);
1138
1139         return qb_attr_code_decode(&code_scn_rid, p);
1140 }
1141
1142 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1143 {
1144         const uint64_t *p = (const uint64_t *)qb_cl(scn);
1145
1146         return qb_attr_code_decode_64(&code_scn_ctx_lo, p);
1147 }
1148
1149 uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn)
1150 {
1151         const uint32_t *p = qb_cl(scn);
1152
1153         return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);
1154 }
1155
1156 uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)
1157 {
1158         const uint32_t *p = qb_cl(scn);
1159         uint32_t result_rid;
1160
1161         result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);
1162         return make_le24(result_rid);
1163 }
1164
1165 /*****************/
1166 /* Parsing BPSCN */
1167 /*****************/
1168 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1169 {
1170         return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;
1171 }
1172
1173 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1174 {
1175         return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);
1176 }
1177
1178 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1179 {
1180         return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);
1181 }
1182
1183 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1184 {
1185         return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);
1186 }
1187
1188 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1189 {
1190         uint64_t ctx;
1191         uint32_t ctx_hi, ctx_lo;
1192
1193         ctx = qbman_result_SCN_ctx(scn);
1194         ctx_hi = upper32(ctx);
1195         ctx_lo = lower32(ctx);
1196         return ((uint64_t)make_le32(ctx_hi) << 32 |
1197                 (uint64_t)make_le32(ctx_lo));
1198 }
1199
1200 /*****************/
1201 /* Parsing CGCU  */
1202 /*****************/
1203 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1204 {
1205         return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;
1206 }
1207
1208 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1209 {
1210         uint64_t ctx;
1211         uint32_t ctx_hi, ctx_lo;
1212
1213         ctx = qbman_result_SCN_ctx(scn);
1214         ctx_hi = upper32(ctx);
1215         ctx_lo = lower32(ctx);
1216         return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) |
1217                 (uint64_t)make_le32(ctx_lo);
1218 }
1219
1220 /******************/
1221 /* Buffer release */
1222 /******************/
1223
1224 /* These should be const, eventually */
1225 /* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
1226 static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
1227 static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);
1228 static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
1229
1230 void qbman_release_desc_clear(struct qbman_release_desc *d)
1231 {
1232         uint32_t *cl;
1233
1234         memset(d, 0, sizeof(*d));
1235         cl = qb_cl(d);
1236         qb_attr_code_encode(&code_release_set_me, cl, 1);
1237 }
1238
1239 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
1240 {
1241         uint32_t *cl = qb_cl(d);
1242
1243         qb_attr_code_encode(&code_release_bpid, cl, bpid);
1244 }
1245
1246 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1247 {
1248         uint32_t *cl = qb_cl(d);
1249
1250         qb_attr_code_encode(&code_release_rcdi, cl, !!enable);
1251 }
1252
1253 #define RAR_IDX(rar)     ((rar) & 0x7)
1254 #define RAR_VB(rar)      ((rar) & 0x80)
1255 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1256
1257 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1258                       const uint64_t *buffers, unsigned int num_buffers)
1259 {
1260         uint32_t *p;
1261         const uint32_t *cl = qb_cl(d);
1262         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1263
1264         pr_debug("RAR=%08x\n", rar);
1265         if (!RAR_SUCCESS(rar))
1266                 return -EBUSY;
1267         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1268         /* Start the release command */
1269         p = qbman_cena_write_start_wo_shadow(&s->sys,
1270                                              QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1271         /* Copy the caller's buffer pointers to the command */
1272         u64_to_le32_copy(&p[2], buffers, num_buffers);
1273         /* Set the verb byte, have to substitute in the valid-bit and the number
1274          * of buffers.
1275          */
1276         lwsync();
1277         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1278         qbman_cena_write_complete_wo_shadow(&s->sys,
1279                                             QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1280         return 0;
1281 }
1282
1283 /*******************/
1284 /* Buffer acquires */
1285 /*******************/
1286
1287 /* These should be const, eventually */
1288 static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
1289 static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
1290 static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
1291
1292 int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
1293                       unsigned int num_buffers)
1294 {
1295         uint32_t *p;
1296         uint32_t rslt, num;
1297
1298         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1299
1300         /* Start the management command */
1301         p = qbman_swp_mc_start(s);
1302
1303         if (!p)
1304                 return -EBUSY;
1305
1306         /* Encode the caller-provided attributes */
1307         qb_attr_code_encode(&code_acquire_bpid, p, bpid);
1308         qb_attr_code_encode(&code_acquire_num, p, num_buffers);
1309
1310         /* Complete the management command */
1311         p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
1312
1313         /* Decode the outcome */
1314         rslt = qb_attr_code_decode(&code_generic_rslt, p);
1315         num = qb_attr_code_decode(&code_acquire_r_num, p);
1316         QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) !=
1317                      QBMAN_MC_ACQUIRE);
1318
1319         /* Determine success or failure */
1320         if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1321                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1322                        bpid, rslt);
1323                 return -EIO;
1324         }
1325         QBMAN_BUG_ON(num > num_buffers);
1326         /* Copy the acquired buffers to the caller's array */
1327         u64_from_le32_copy(buffers, &p[2], num);
1328         return (int)num;
1329 }
1330
1331 /*****************/
1332 /* FQ management */
1333 /*****************/
1334
1335 static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);
1336
1337 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1338                                   uint8_t alt_fq_verb)
1339 {
1340         uint32_t *p;
1341         uint32_t rslt;
1342
1343         /* Start the management command */
1344         p = qbman_swp_mc_start(s);
1345         if (!p)
1346                 return -EBUSY;
1347
1348         qb_attr_code_encode(&code_fqalt_fqid, p, fqid);
1349         /* Complete the management command */
1350         p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);
1351
1352         /* Decode the outcome */
1353         rslt = qb_attr_code_decode(&code_generic_rslt, p);
1354         QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb);
1355
1356         /* Determine success or failure */
1357         if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1358                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1359                        fqid, alt_fq_verb, rslt);
1360                 return -EIO;
1361         }
1362
1363         return 0;
1364 }
1365
1366 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1367 {
1368         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1369 }
1370
1371 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1372 {
1373         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1374 }
1375
1376 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1377 {
1378         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1379 }
1380
1381 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1382 {
1383         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1384 }
1385
1386 /**********************/
1387 /* Channel management */
1388 /**********************/
1389
1390 static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);
1391 static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);
1392 static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);
1393 static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);
1394
1395 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1396  * would be irresponsible to expose it.
1397  */
1398 #define CODE_CDAN_WE_EN    0x1
1399 #define CODE_CDAN_WE_CTX   0x4
1400
1401 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1402                               uint8_t we_mask, uint8_t cdan_en,
1403                               uint64_t ctx)
1404 {
1405         uint32_t *p;
1406         uint32_t rslt;
1407
1408         /* Start the management command */
1409         p = qbman_swp_mc_start(s);
1410         if (!p)
1411                 return -EBUSY;
1412
1413         /* Encode the caller-provided attributes */
1414         qb_attr_code_encode(&code_cdan_cid, p, channelid);
1415         qb_attr_code_encode(&code_cdan_we, p, we_mask);
1416         qb_attr_code_encode(&code_cdan_en, p, cdan_en);
1417         qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);
1418         /* Complete the management command */
1419         p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);
1420
1421         /* Decode the outcome */
1422         rslt = qb_attr_code_decode(&code_generic_rslt, p);
1423         QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p)
1424                                         != QBMAN_WQCHAN_CONFIGURE);
1425
1426         /* Determine success or failure */
1427         if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1428                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1429                        channelid, rslt);
1430                 return -EIO;
1431         }
1432
1433         return 0;
1434 }
1435
1436 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1437                                uint64_t ctx)
1438 {
1439         return qbman_swp_CDAN_set(s, channelid,
1440                                   CODE_CDAN_WE_CTX,
1441                                   0, ctx);
1442 }
1443
1444 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1445 {
1446         return qbman_swp_CDAN_set(s, channelid,
1447                                   CODE_CDAN_WE_EN,
1448                                   1, 0);
1449 }
1450
1451 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1452 {
1453         return qbman_swp_CDAN_set(s, channelid,
1454                                   CODE_CDAN_WE_EN,
1455                                   0, 0);
1456 }
1457
1458 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1459                                       uint64_t ctx)
1460 {
1461         return qbman_swp_CDAN_set(s, channelid,
1462                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1463                                   1, ctx);
1464 }
1465
1466 uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr)
1467 {
1468         return QBMAN_IDX_FROM_DQRR(dqrr);
1469 }
1470
1471 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1472 {
1473         struct qbman_result *dq;
1474
1475         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
1476         return dq;
1477 }
1478
1479 int qbman_swp_send_multiple(struct qbman_swp *s,
1480                             const struct qbman_eq_desc *d,
1481                             const struct qbman_fd *fd,
1482                             int frames_to_send)
1483 {
1484         uint32_t *p;
1485         const uint32_t *cl = qb_cl(d);
1486         uint32_t eqcr_ci;
1487         uint8_t diff;
1488         int sent = 0;
1489         int i;
1490         int initial_pi = s->eqcr.pi;
1491         uint64_t start_pointer;
1492
1493         if (!s->eqcr.available) {
1494                 eqcr_ci = s->eqcr.ci;
1495                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1496                                  QBMAN_CENA_SWP_EQCR_CI) & 0xF;
1497                 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
1498                                    eqcr_ci, s->eqcr.ci);
1499                 if (!diff)
1500                         goto done;
1501                 s->eqcr.available += diff;
1502         }
1503
1504         /* we are trying to send frames_to_send,
1505          * if we have enough space in the ring
1506          */
1507         while (s->eqcr.available && frames_to_send--) {
1508                 p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
1509                                         QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
1510                 /* Write command (except of first byte) and FD */
1511                 memcpy(&p[1], &cl[1], 7 * 4);
1512                 memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd));
1513
1514                 initial_pi++;
1515                 initial_pi &= 0xF;
1516                 s->eqcr.available--;
1517                 sent++;
1518         }
1519
1520 done:
1521         initial_pi =  s->eqcr.pi;
1522         lwsync();
1523
1524         /* in order for flushes to complete faster:
1525          * we use a following trick: we record all lines in 32 bit word
1526          */
1527
1528         initial_pi =  s->eqcr.pi;
1529         for (i = 0; i < sent; i++) {
1530                 p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
1531                                         QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
1532
1533                 p[0] = cl[0] | s->eqcr.pi_vb;
1534                 initial_pi++;
1535                 initial_pi &= 0xF;
1536
1537                 if (!(initial_pi & 7))
1538                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1539         }
1540
1541         initial_pi = s->eqcr.pi;
1542
1543         /* We need  to flush all the lines but without
1544          * load/store operations between them.
1545          * We assign start_pointer before we start loop so that
1546          * in loop we do not read it from memory
1547          */
1548         start_pointer = (uint64_t)s->sys.addr_cena;
1549         for (i = 0; i < sent; i++) {
1550                 p = (uint32_t *)(start_pointer
1551                                  + QBMAN_CENA_SWP_EQCR(initial_pi & 7));
1552                 dcbf((uint64_t)p);
1553                 initial_pi++;
1554                 initial_pi &= 0xF;
1555         }
1556
1557         /* Update producer index for the next call */
1558         s->eqcr.pi = initial_pi;
1559
1560         return sent;
1561 }
1562
1563 int qbman_get_version(void)
1564 {
1565         return qman_version;
1566 }