4 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "qbman_portal.h"
31 /* QBMan portal management command codes */
32 #define QBMAN_MC_ACQUIRE 0x30
33 #define QBMAN_WQCHAN_CONFIGURE 0x46
35 /* CINH register offsets */
36 #define QBMAN_CINH_SWP_EQCR_PI 0x800
37 #define QBMAN_CINH_SWP_EQCR_CI 0x840
38 #define QBMAN_CINH_SWP_EQAR 0x8c0
39 #define QBMAN_CINH_SWP_DQPI 0xa00
40 #define QBMAN_CINH_SWP_DCAP 0xac0
41 #define QBMAN_CINH_SWP_SDQCR 0xb00
42 #define QBMAN_CINH_SWP_RAR 0xcc0
43 #define QBMAN_CINH_SWP_ISR 0xe00
44 #define QBMAN_CINH_SWP_IER 0xe40
45 #define QBMAN_CINH_SWP_ISDR 0xe80
46 #define QBMAN_CINH_SWP_IIR 0xec0
47 #define QBMAN_CINH_SWP_DQRR_ITR 0xa80
48 #define QBMAN_CINH_SWP_ITPR 0xf40
50 /* CENA register offsets */
51 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
53 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
54 #define QBMAN_CENA_SWP_CR 0x600
55 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
56 #define QBMAN_CENA_SWP_VDQCR 0x780
57 #define QBMAN_CENA_SWP_EQCR_CI 0x840
59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
62 /* QBMan FQ management command codes */
63 #define QBMAN_FQ_SCHEDULE 0x48
64 #define QBMAN_FQ_FORCE 0x49
65 #define QBMAN_FQ_XON 0x4d
66 #define QBMAN_FQ_XOFF 0x4e
68 /*******************************/
69 /* Pre-defined attribute codes */
70 /*******************************/
72 struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
73 struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
75 /*************************/
76 /* SDQCR attribute codes */
77 /*************************/
79 /* we put these here because at least some of them are required by
82 struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
83 struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
84 struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
85 static struct qb_attr_code code_eq_dca_idx;
86 #define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
87 enum qbman_sdqcr_dct {
88 qbman_sdqcr_dct_null = 0,
89 qbman_sdqcr_dct_prio_ics,
90 qbman_sdqcr_dct_active_ics,
91 qbman_sdqcr_dct_active
95 qbman_sdqcr_fc_one = 0,
96 qbman_sdqcr_fc_up_to_3 = 1
99 struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);
101 /* We need to keep track of which SWP triggered a pull command
102 * so keep an array of portal IDs and use the token field to
103 * be able to find the proper portal
105 #define MAX_QBMAN_PORTALS 35
106 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
108 uint32_t qman_version;
110 /*********************************/
111 /* Portal constructor/destructor */
112 /*********************************/
114 /* Software portals should always be in the power-on state when we initialise,
115 * due to the CCSR-based portal reset functionality that MC has.
117 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
118 * valid-bits, so we need to support a workaround where we don't trust
119 * valid-bits when detecting new entries until any stale ring entries have been
120 * overwritten at least once. The idea is that we read PI for the first few
121 * entries, then switch to valid-bit after that. The trick is to clear the
122 * bug-work-around boolean once the PI wraps around the ring for the first time.
124 * Note: this still carries a slight additional cost once the decrementer hits
127 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
131 struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
136 #ifdef QBMAN_CHECKING
137 p->mc.check = swp_mc_can_start;
139 p->mc.valid_bit = QB_VALID_BIT;
141 qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
142 qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
143 qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
144 atomic_set(&p->vdq.busy, 1);
145 p->vdq.valid_bit = QB_VALID_BIT;
146 p->dqrr.next_idx = 0;
147 p->dqrr.valid_bit = QB_VALID_BIT;
148 qman_version = p->desc.qman_version;
149 if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
150 p->dqrr.dqrr_size = 4;
151 p->dqrr.reset_bug = 1;
152 /* Set size of DQRR to 4, encoded in 2 bits */
153 code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 2);
155 p->dqrr.dqrr_size = 8;
156 p->dqrr.reset_bug = 0;
157 /* Set size of DQRR to 8, encoded in 3 bits */
158 code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 3);
161 ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
164 pr_err("qbman_swp_sys_init() failed %d\n", ret);
167 /* SDQCR needs to be initialized to 0 when no channels are
168 * being dequeued from or else the QMan HW will indicate an
169 * error. The values that were calculated above will be
170 * applied when dequeues from a specific channel are enabled
172 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
173 eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
174 p->eqcr.pi = eqcr_pi & 0xF;
175 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
176 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
177 p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
178 p->eqcr.ci, p->eqcr.pi);
180 portal_idx_map[p->desc.idx] = p;
184 void qbman_swp_finish(struct qbman_swp *p)
186 #ifdef QBMAN_CHECKING
187 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
189 qbman_swp_sys_finish(&p->sys);
190 portal_idx_map[p->desc.idx] = NULL;
194 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
203 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
205 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
208 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
210 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
213 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
215 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
218 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
220 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
223 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
225 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
228 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
230 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
233 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
235 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
238 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
240 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
243 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
245 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
248 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
250 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
253 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
255 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
258 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
260 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
263 /***********************/
264 /* Management commands */
265 /***********************/
268 * Internal code common to all types of management commands.
271 void *qbman_swp_mc_start(struct qbman_swp *p)
274 #ifdef QBMAN_CHECKING
275 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
277 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
278 #ifdef QBMAN_CHECKING
280 p->mc.check = swp_mc_can_submit;
285 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
288 #ifdef QBMAN_CHECKING
289 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
291 /* TBD: "|=" is going to hurt performance. Need to move as many fields
292 * out of word zero, and for those that remain, the "OR" needs to occur
293 * at the caller side. This debug check helps to catch cases where the
294 * caller wants to OR but has forgotten to do so.
296 QBMAN_BUG_ON((*v & cmd_verb) != *v);
297 *v = cmd_verb | p->mc.valid_bit;
298 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
299 #ifdef QBMAN_CHECKING
300 p->mc.check = swp_mc_can_poll;
304 void *qbman_swp_mc_result(struct qbman_swp *p)
307 #ifdef QBMAN_CHECKING
308 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
310 qbman_cena_invalidate_prefetch(&p->sys,
311 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
312 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
313 /* Remove the valid-bit - command completed if the rest is non-zero */
314 verb = ret[0] & ~QB_VALID_BIT;
317 #ifdef QBMAN_CHECKING
318 p->mc.check = swp_mc_can_start;
320 p->mc.valid_bit ^= QB_VALID_BIT;
328 /* These should be const, eventually */
329 static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
330 static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);
331 static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);
332 static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);
333 /* Can't set code_eq_dca_idx width. Need qman version. Read at runtime */
334 static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
335 static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);
336 static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);
337 static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);
338 static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);
339 static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
340 /* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
341 static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
342 static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
343 static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
344 static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
345 static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);
346 static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
348 enum qbman_eq_cmd_e {
349 /* No enqueue, primarily for plugging ORP gaps for dropped frames */
351 /* DMA an enqueue response once complete */
352 qbman_eq_cmd_respond,
353 /* DMA an enqueue response only if the enqueue fails */
354 qbman_eq_cmd_respond_reject
357 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
359 memset(d, 0, sizeof(*d));
362 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
364 uint32_t *cl = qb_cl(d);
366 qb_attr_code_encode(&code_eq_orp_en, cl, 0);
367 qb_attr_code_encode(&code_eq_cmd, cl,
368 respond_success ? qbman_eq_cmd_respond :
369 qbman_eq_cmd_respond_reject);
372 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
373 uint32_t opr_id, uint32_t seqnum, int incomplete)
375 uint32_t *cl = qb_cl(d);
377 qb_attr_code_encode(&code_eq_orp_en, cl, 1);
378 qb_attr_code_encode(&code_eq_cmd, cl,
379 respond_success ? qbman_eq_cmd_respond :
380 qbman_eq_cmd_respond_reject);
381 qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
382 qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
383 qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);
386 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
389 uint32_t *cl = qb_cl(d);
391 qb_attr_code_encode(&code_eq_orp_en, cl, 1);
392 qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
393 qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
394 qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
395 qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
396 qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);
399 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
402 uint32_t *cl = qb_cl(d);
404 qb_attr_code_encode(&code_eq_orp_en, cl, 1);
405 qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
406 qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
407 qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
408 qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
409 qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);
412 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
413 dma_addr_t storage_phys,
416 uint32_t *cl = qb_cl(d);
418 qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
419 qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
422 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
424 uint32_t *cl = qb_cl(d);
426 qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);
429 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
431 uint32_t *cl = qb_cl(d);
433 qb_attr_code_encode(&code_eq_qd_en, cl, 0);
434 qb_attr_code_encode(&code_eq_tgt_id, cl, fqid);
437 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
438 uint32_t qd_bin, uint32_t qd_prio)
440 uint32_t *cl = qb_cl(d);
442 qb_attr_code_encode(&code_eq_qd_en, cl, 1);
443 qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
444 qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
445 qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
448 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
450 uint32_t *cl = qb_cl(d);
452 qb_attr_code_encode(&code_eq_eqdi, cl, !!enable);
455 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
456 uint32_t dqrr_idx, int park)
458 uint32_t *cl = qb_cl(d);
460 qb_attr_code_encode(&code_eq_dca_en, cl, !!enable);
462 qb_attr_code_encode(&code_eq_dca_pk, cl, !!park);
463 qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);
467 #define EQAR_IDX(eqar) ((eqar) & 0x7)
468 #define EQAR_VB(eqar) ((eqar) & 0x80)
469 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
470 static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
471 const struct qbman_eq_desc *d,
472 const struct qbman_fd *fd)
475 const uint32_t *cl = qb_cl(d);
476 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
478 pr_debug("EQAR=%08x\n", eqar);
479 if (!EQAR_SUCCESS(eqar))
481 p = qbman_cena_write_start_wo_shadow(&s->sys,
482 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
483 word_copy(&p[1], &cl[1], 7);
484 word_copy(&p[8], fd, sizeof(*fd) >> 2);
485 /* Set the verb byte, have to substitute in the valid-bit */
487 p[0] = cl[0] | EQAR_VB(eqar);
488 qbman_cena_write_complete_wo_shadow(&s->sys,
489 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
493 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
494 const struct qbman_eq_desc *d,
495 const struct qbman_fd *fd)
498 const uint32_t *cl = qb_cl(d);
502 if (!s->eqcr.available) {
503 eqcr_ci = s->eqcr.ci;
504 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
505 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
506 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
507 eqcr_ci, s->eqcr.ci);
508 s->eqcr.available += diff;
513 p = qbman_cena_write_start_wo_shadow(&s->sys,
514 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
515 word_copy(&p[1], &cl[1], 7);
516 word_copy(&p[8], fd, sizeof(*fd) >> 2);
518 /* Set the verb byte, have to substitute in the valid-bit */
519 p[0] = cl[0] | s->eqcr.pi_vb;
520 qbman_cena_write_complete_wo_shadow(&s->sys,
521 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
525 if (!(s->eqcr.pi & 7))
526 s->eqcr.pi_vb ^= QB_VALID_BIT;
530 int qbman_swp_fill_ring(struct qbman_swp *s,
531 const struct qbman_eq_desc *d,
532 const struct qbman_fd *fd,
533 __attribute__((unused)) uint8_t burst_index)
536 const uint32_t *cl = qb_cl(d);
540 if (!s->eqcr.available) {
541 eqcr_ci = s->eqcr.ci;
542 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
543 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
544 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
545 eqcr_ci, s->eqcr.ci);
546 s->eqcr.available += diff;
550 p = qbman_cena_write_start_wo_shadow(&s->sys,
551 QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7));
552 /* word_copy(&p[1], &cl[1], 7); */
553 memcpy(&p[1], &cl[1], 7 * 4);
554 /* word_copy(&p[8], fd, sizeof(*fd) >> 2); */
555 memcpy(&p[8], fd, sizeof(struct qbman_fd));
558 p[0] = cl[0] | s->eqcr.pi_vb;
563 if (!(s->eqcr.pi & 7))
564 s->eqcr.pi_vb ^= QB_VALID_BIT;
569 int qbman_swp_flush_ring(struct qbman_swp *s)
571 void *ptr = s->sys.addr_cena;
574 dcbf((uint64_t)ptr + 0x40);
575 dcbf((uint64_t)ptr + 0x80);
576 dcbf((uint64_t)ptr + 0xc0);
577 dcbf((uint64_t)ptr + 0x100);
578 dcbf((uint64_t)ptr + 0x140);
579 dcbf((uint64_t)ptr + 0x180);
580 dcbf((uint64_t)ptr + 0x1c0);
585 void qbman_sync(void)
590 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
591 const struct qbman_fd *fd)
593 if (s->sys.eqcr_mode == qman_eqcr_vb_array)
594 return qbman_swp_enqueue_array_mode(s, d, fd);
595 else /* Use ring mode by default */
596 return qbman_swp_enqueue_ring_mode(s, d, fd);
599 int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
600 const struct qbman_eq_desc *d,
601 const struct qbman_fd *fd,
605 const uint32_t *cl = qb_cl(d);
606 uint32_t eqcr_ci, eqcr_pi;
608 int i, num_enqueued = 0;
611 if (!s->eqcr.available) {
612 eqcr_ci = s->eqcr.ci;
613 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
614 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
615 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
616 eqcr_ci, s->eqcr.ci);
617 s->eqcr.available += diff;
622 eqcr_pi = s->eqcr.pi;
623 num_enqueued = (s->eqcr.available < num_frames) ?
624 s->eqcr.available : num_frames;
625 s->eqcr.available -= num_enqueued;
626 /* Fill in the EQCR ring */
627 for (i = 0; i < num_enqueued; i++) {
628 p = qbman_cena_write_start_wo_shadow(&s->sys,
629 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
630 memcpy(&p[1], &cl[1], 28);
631 memcpy(&p[8], &fd[i], sizeof(*fd));
634 /*Pointing to the next enqueue descriptor*/
635 cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
640 /* Set the verb byte, have to substitute in the valid-bit */
641 eqcr_pi = s->eqcr.pi;
643 for (i = 0; i < num_enqueued; i++) {
644 p = qbman_cena_write_start_wo_shadow(&s->sys,
645 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
646 p[0] = cl[0] | s->eqcr.pi_vb;
650 s->eqcr.pi_vb ^= QB_VALID_BIT;
651 /*Pointing to the next enqueue descriptor*/
652 cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
655 /* Flush all the cacheline without load/store in between */
656 eqcr_pi = s->eqcr.pi;
657 addr_cena = (uint64_t)s->sys.addr_cena;
658 for (i = 0; i < num_enqueued; i++) {
659 dcbf((uint64_t *)(addr_cena +
660 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
664 s->eqcr.pi = eqcr_pi;
669 /*************************/
670 /* Static (push) dequeue */
671 /*************************/
673 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
675 struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
677 QBMAN_BUG_ON(channel_idx > 15);
678 *enabled = (int)qb_attr_code_decode(&code, &s->sdq);
681 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
684 struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
686 QBMAN_BUG_ON(channel_idx > 15);
687 qb_attr_code_encode(&code, &s->sdq, !!enable);
688 /* Read make the complete src map. If no channels are enabled
689 * the SDQCR must be 0 or else QMan will assert errors
691 dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);
693 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
695 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
698 /***************************/
699 /* Volatile (pull) dequeue */
700 /***************************/
702 /* These should be const, eventually */
703 static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
704 static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
705 static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
706 static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
707 static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
708 static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
709 static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
710 static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
714 qb_pull_dt_workqueue,
715 qb_pull_dt_framequeue
718 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
720 memset(d, 0, sizeof(*d));
723 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
724 struct qbman_result *storage,
725 dma_addr_t storage_phys,
728 uint32_t *cl = qb_cl(d);
729 /* Squiggle the pointer 'storage' into the extra 2 words of the
730 * descriptor (which aren't copied to the hw command)
732 *(void **)&cl[4] = storage;
734 qb_attr_code_encode(&code_pull_rls, cl, 0);
737 qb_attr_code_encode(&code_pull_rls, cl, 1);
738 qb_attr_code_encode(&code_pull_stash, cl, !!stash);
739 qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
742 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
744 uint32_t *cl = qb_cl(d);
746 QBMAN_BUG_ON(!numframes || (numframes > 16));
747 qb_attr_code_encode(&code_pull_numframes, cl,
748 (uint32_t)(numframes - 1));
751 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
753 uint32_t *cl = qb_cl(d);
755 qb_attr_code_encode(&code_pull_token, cl, token);
758 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
760 uint32_t *cl = qb_cl(d);
762 qb_attr_code_encode(&code_pull_dct, cl, 1);
763 qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
764 qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
767 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
768 enum qbman_pull_type_e dct)
770 uint32_t *cl = qb_cl(d);
772 qb_attr_code_encode(&code_pull_dct, cl, dct);
773 qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);
774 qb_attr_code_encode(&code_pull_dqsource, cl, wqid);
777 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
778 enum qbman_pull_type_e dct)
780 uint32_t *cl = qb_cl(d);
782 qb_attr_code_encode(&code_pull_dct, cl, dct);
783 qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);
784 qb_attr_code_encode(&code_pull_dqsource, cl, chid);
787 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
790 uint32_t *cl = qb_cl(d);
792 if (!atomic_dec_and_test(&s->vdq.busy)) {
793 atomic_inc(&s->vdq.busy);
796 s->vdq.storage = *(void **)&cl[4];
797 /* We use portal index +1 as token so that 0 still indicates
798 * that the result isn't valid yet.
800 qb_attr_code_encode(&code_pull_token, cl, s->desc.idx + 1);
801 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
802 word_copy(&p[1], &cl[1], 3);
803 /* Set the verb byte, have to substitute in the valid-bit */
805 p[0] = cl[0] | s->vdq.valid_bit;
806 s->vdq.valid_bit ^= QB_VALID_BIT;
807 qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
815 static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
816 static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
817 static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
818 static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);
819 static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);
820 /* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
821 static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);
822 static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);
823 static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);
824 static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
826 #define QBMAN_RESULT_DQ 0x60
827 #define QBMAN_RESULT_FQRN 0x21
828 #define QBMAN_RESULT_FQRNI 0x22
829 #define QBMAN_RESULT_FQPN 0x24
830 #define QBMAN_RESULT_FQDAN 0x25
831 #define QBMAN_RESULT_CDAN 0x26
832 #define QBMAN_RESULT_CSCN_MEM 0x27
833 #define QBMAN_RESULT_CGCU 0x28
834 #define QBMAN_RESULT_BPSCN 0x29
835 #define QBMAN_RESULT_CSCN_WQ 0x2a
837 static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);
839 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
840 * only once, so repeated calls can return a sequence of DQRR entries, without
841 * requiring they be consumed immediately or in any particular order.
843 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
846 uint32_t response_verb;
848 const struct qbman_result *dq;
851 /* Before using valid-bit to detect if something is there, we have to
852 * handle the case of the DQRR reset bug...
854 if (unlikely(s->dqrr.reset_bug)) {
855 /* We pick up new entries by cache-inhibited producer index,
856 * which means that a non-coherent mapping would require us to
857 * invalidate and read *only* once that PI has indicated that
858 * there's an entry here. The first trip around the DQRR ring
859 * will be much less efficient than all subsequent trips around
862 uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
863 uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
864 /* there are new entries if pi != next_idx */
865 if (pi == s->dqrr.next_idx)
867 /* if next_idx is/was the last ring index, and 'pi' is
868 * different, we can disable the workaround as all the ring
869 * entries have now been DMA'd to so valid-bit checking is
870 * repaired. Note: this logic needs to be based on next_idx
871 * (which increments one at a time), rather than on pi (which
872 * can burst and wrap-around between our snapshots of it).
874 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
875 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
876 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
877 s->dqrr.next_idx, pi);
878 s->dqrr.reset_bug = 0;
880 qbman_cena_invalidate_prefetch(&s->sys,
881 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
883 dq = qbman_cena_read_wo_shadow(&s->sys,
884 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
886 verb = qb_attr_code_decode(&code_dqrr_verb, p);
887 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
888 * in the DQRR reset bug workaround, we shouldn't need to skip these
889 * check, because we've already determined that a new entry is available
890 * and we've invalidated the cacheline before reading it, so the
891 * valid-bit behaviour is repaired and should tell us what we already
892 * knew from reading PI.
894 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
897 /* There's something there. Move "next_idx" attention to the next ring
898 * entry (and prefetch it) before returning what we found.
901 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
902 s->dqrr.next_idx = 0;
903 s->dqrr.valid_bit ^= QB_VALID_BIT;
905 /* If this is the final response to a volatile dequeue command
906 * indicate that the vdq is no longer busy.
908 flags = qbman_result_DQ_flags(dq);
909 response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
910 if ((response_verb == QBMAN_RESULT_DQ) &&
911 (flags & QBMAN_DQ_STAT_VOLATILE) &&
912 (flags & QBMAN_DQ_STAT_EXPIRED))
913 atomic_inc(&s->vdq.busy);
918 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
919 void qbman_swp_dqrr_consume(struct qbman_swp *s,
920 const struct qbman_result *dq)
922 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
925 /*********************************/
926 /* Polling user-provided storage */
927 /*********************************/
929 int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s,
930 const struct qbman_result *dq)
932 /* To avoid converting the little-endian DQ entry to host-endian prior
933 * to us knowing whether there is a valid entry or not (and run the
934 * risk of corrupting the incoming hardware LE write), we detect in
935 * hardware endianness rather than host. This means we need a different
936 * "code" depending on whether we are BE or LE in software, which is
937 * where DQRR_TOK_OFFSET comes in...
939 static struct qb_attr_code code_dqrr_tok_detect =
940 QB_CODE(0, DQRR_TOK_OFFSET, 8);
941 /* The user trying to poll for a result treats "dq" as const. It is
942 * however the same address that was provided to us non-const in the
943 * first place, for directing hardware DMA to. So we can cast away the
944 * const because it is mutable from our perspective.
946 uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
949 token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
952 /* Entry is valid - overwrite token back to 0 so
953 * a) If this memory is reused tokesn will be 0
954 * b) If someone calls "has_new_result()" again on this entry it
955 * will not appear to be new
957 qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);
959 /* Only now do we convert from hardware to host endianness. Also, as we
960 * are returning success, the user has promised not to call us again, so
961 * there's no risk of us converting the endianness twice...
967 int qbman_check_command_complete(struct qbman_swp *s,
968 const struct qbman_result *dq)
970 /* To avoid converting the little-endian DQ entry to host-endian prior
971 * to us knowing whether there is a valid entry or not (and run the
972 * risk of corrupting the incoming hardware LE write), we detect in
973 * hardware endianness rather than host. This means we need a different
974 * "code" depending on whether we are BE or LE in software, which is
975 * where DQRR_TOK_OFFSET comes in...
977 static struct qb_attr_code code_dqrr_tok_detect =
978 QB_CODE(0, DQRR_TOK_OFFSET, 8);
979 /* The user trying to poll for a result treats "dq" as const. It is
980 * however the same address that was provided to us non-const in the
981 * first place, for directing hardware DMA to. So we can cast away the
982 * const because it is mutable from our perspective.
984 uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
987 token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
990 /* TODO: Remove qbman_swp from parameters and make it a local
991 * once we've tested the reserve portal map change
993 s = portal_idx_map[token - 1];
994 /* When token is set it indicates that VDQ command has been fetched
995 * by qbman and is working on it. It is safe for software to issue
996 * another VDQ command, so incrementing the busy variable.
998 if (s->vdq.storage == dq) {
999 s->vdq.storage = NULL;
1000 atomic_inc(&s->vdq.busy);
1005 /********************************/
1006 /* Categorising qbman results */
1007 /********************************/
1009 static struct qb_attr_code code_result_in_mem =
1010 QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);
1012 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1015 const uint32_t *p = qb_cl(dq);
1016 uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
1018 return (response_verb == x);
1021 static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq,
1024 const uint32_t *p = qb_cl(dq);
1025 uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);
1027 return (response_verb == x);
1030 int qbman_result_is_DQ(const struct qbman_result *dq)
1032 return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1035 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1037 return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1040 int qbman_result_is_CDAN(const struct qbman_result *dq)
1042 return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1045 int qbman_result_is_CSCN(const struct qbman_result *dq)
1047 return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||
1048 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1051 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1053 return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);
1056 int qbman_result_is_CGCU(const struct qbman_result *dq)
1058 return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);
1061 int qbman_result_is_FQRN(const struct qbman_result *dq)
1063 return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);
1066 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1068 return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);
1071 int qbman_result_is_FQPN(const struct qbman_result *dq)
1073 return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1076 /*********************************/
1077 /* Parsing frame dequeue results */
1078 /*********************************/
1080 /* These APIs assume qbman_result_is_DQ() is TRUE */
1082 uint32_t qbman_result_DQ_flags(const struct qbman_result *dq)
1084 const uint32_t *p = qb_cl(dq);
1086 return qb_attr_code_decode(&code_dqrr_stat, p);
1089 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1091 const uint32_t *p = qb_cl(dq);
1093 return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);
1096 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1098 const uint32_t *p = qb_cl(dq);
1100 return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);
1103 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1105 const uint32_t *p = qb_cl(dq);
1107 return qb_attr_code_decode(&code_dqrr_fqid, p);
1110 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1112 const uint32_t *p = qb_cl(dq);
1114 return qb_attr_code_decode(&code_dqrr_byte_count, p);
1117 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1119 const uint32_t *p = qb_cl(dq);
1121 return qb_attr_code_decode(&code_dqrr_frame_count, p);
1124 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1126 const uint64_t *p = (const uint64_t *)qb_cl(dq);
1128 return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);
1131 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1133 const uint32_t *p = qb_cl(dq);
1135 return (const struct qbman_fd *)&p[8];
1138 /**************************************/
1139 /* Parsing state-change notifications */
1140 /**************************************/
1142 static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);
1143 static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);
1144 static struct qb_attr_code code_scn_state_in_mem =
1145 QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);
1146 static struct qb_attr_code code_scn_rid_in_mem =
1147 QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);
1148 static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);
1150 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1152 const uint32_t *p = qb_cl(scn);
1154 return (uint8_t)qb_attr_code_decode(&code_scn_state, p);
1157 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1159 const uint32_t *p = qb_cl(scn);
1161 return qb_attr_code_decode(&code_scn_rid, p);
1164 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1166 const uint64_t *p = (const uint64_t *)qb_cl(scn);
1168 return qb_attr_code_decode_64(&code_scn_ctx_lo, p);
1171 uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn)
1173 const uint32_t *p = qb_cl(scn);
1175 return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);
1178 uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)
1180 const uint32_t *p = qb_cl(scn);
1181 uint32_t result_rid;
1183 result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);
1184 return make_le24(result_rid);
1190 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1192 return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;
1195 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1197 return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);
1200 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1202 return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);
1205 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1207 return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);
1210 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1213 uint32_t ctx_hi, ctx_lo;
1215 ctx = qbman_result_SCN_ctx(scn);
1216 ctx_hi = upper32(ctx);
1217 ctx_lo = lower32(ctx);
1218 return ((uint64_t)make_le32(ctx_hi) << 32 |
1219 (uint64_t)make_le32(ctx_lo));
1225 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1227 return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;
1230 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1233 uint32_t ctx_hi, ctx_lo;
1235 ctx = qbman_result_SCN_ctx(scn);
1236 ctx_hi = upper32(ctx);
1237 ctx_lo = lower32(ctx);
1238 return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) |
1239 (uint64_t)make_le32(ctx_lo);
1242 /******************/
1243 /* Buffer release */
1244 /******************/
1246 /* These should be const, eventually */
1247 /* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
1248 static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
1249 static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);
1250 static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
1252 void qbman_release_desc_clear(struct qbman_release_desc *d)
1256 memset(d, 0, sizeof(*d));
1258 qb_attr_code_encode(&code_release_set_me, cl, 1);
1261 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
1263 uint32_t *cl = qb_cl(d);
1265 qb_attr_code_encode(&code_release_bpid, cl, bpid);
1268 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1270 uint32_t *cl = qb_cl(d);
1272 qb_attr_code_encode(&code_release_rcdi, cl, !!enable);
1275 #define RAR_IDX(rar) ((rar) & 0x7)
1276 #define RAR_VB(rar) ((rar) & 0x80)
1277 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1279 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1280 const uint64_t *buffers, unsigned int num_buffers)
1283 const uint32_t *cl = qb_cl(d);
1284 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1286 pr_debug("RAR=%08x\n", rar);
1287 if (!RAR_SUCCESS(rar))
1289 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1290 /* Start the release command */
1291 p = qbman_cena_write_start_wo_shadow(&s->sys,
1292 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1293 /* Copy the caller's buffer pointers to the command */
1294 u64_to_le32_copy(&p[2], buffers, num_buffers);
1295 /* Set the verb byte, have to substitute in the valid-bit and the number
1299 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1300 qbman_cena_write_complete_wo_shadow(&s->sys,
1301 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1305 /*******************/
1306 /* Buffer acquires */
1307 /*******************/
1309 /* These should be const, eventually */
1310 static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
1311 static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
1312 static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
1314 int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
1315 unsigned int num_buffers)
1320 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1322 /* Start the management command */
1323 p = qbman_swp_mc_start(s);
1328 /* Encode the caller-provided attributes */
1329 qb_attr_code_encode(&code_acquire_bpid, p, bpid);
1330 qb_attr_code_encode(&code_acquire_num, p, num_buffers);
1332 /* Complete the management command */
1333 p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
1335 /* Decode the outcome */
1336 rslt = qb_attr_code_decode(&code_generic_rslt, p);
1337 num = qb_attr_code_decode(&code_acquire_r_num, p);
1338 QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) !=
1341 /* Determine success or failure */
1342 if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1343 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1347 QBMAN_BUG_ON(num > num_buffers);
1348 /* Copy the acquired buffers to the caller's array */
1349 u64_from_le32_copy(buffers, &p[2], num);
1357 static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);
1359 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1360 uint8_t alt_fq_verb)
1365 /* Start the management command */
1366 p = qbman_swp_mc_start(s);
1370 qb_attr_code_encode(&code_fqalt_fqid, p, fqid);
1371 /* Complete the management command */
1372 p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);
1374 /* Decode the outcome */
1375 rslt = qb_attr_code_decode(&code_generic_rslt, p);
1376 QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb);
1378 /* Determine success or failure */
1379 if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1380 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1381 fqid, alt_fq_verb, rslt);
1388 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1390 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1393 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1395 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1398 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1400 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1403 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1405 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1408 /**********************/
1409 /* Channel management */
1410 /**********************/
1412 static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);
1413 static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);
1414 static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);
1415 static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);
1417 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1418 * would be irresponsible to expose it.
1420 #define CODE_CDAN_WE_EN 0x1
1421 #define CODE_CDAN_WE_CTX 0x4
1423 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1424 uint8_t we_mask, uint8_t cdan_en,
1430 /* Start the management command */
1431 p = qbman_swp_mc_start(s);
1435 /* Encode the caller-provided attributes */
1436 qb_attr_code_encode(&code_cdan_cid, p, channelid);
1437 qb_attr_code_encode(&code_cdan_we, p, we_mask);
1438 qb_attr_code_encode(&code_cdan_en, p, cdan_en);
1439 qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);
1440 /* Complete the management command */
1441 p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);
1443 /* Decode the outcome */
1444 rslt = qb_attr_code_decode(&code_generic_rslt, p);
1445 QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p)
1446 != QBMAN_WQCHAN_CONFIGURE);
1448 /* Determine success or failure */
1449 if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
1450 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1458 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1461 return qbman_swp_CDAN_set(s, channelid,
1466 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1468 return qbman_swp_CDAN_set(s, channelid,
1473 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1475 return qbman_swp_CDAN_set(s, channelid,
1480 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1483 return qbman_swp_CDAN_set(s, channelid,
1484 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1488 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1490 return QBMAN_IDX_FROM_DQRR(dqrr);
1493 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1495 struct qbman_result *dq;
1497 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
1501 int qbman_swp_send_multiple(struct qbman_swp *s,
1502 const struct qbman_eq_desc *d,
1503 const struct qbman_fd *fd,
1507 const uint32_t *cl = qb_cl(d);
1512 int initial_pi = s->eqcr.pi;
1513 uint64_t start_pointer;
1515 if (!s->eqcr.available) {
1516 eqcr_ci = s->eqcr.ci;
1517 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1518 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
1519 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
1520 eqcr_ci, s->eqcr.ci);
1523 s->eqcr.available += diff;
1526 /* we are trying to send frames_to_send,
1527 * if we have enough space in the ring
1529 while (s->eqcr.available && frames_to_send--) {
1530 p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
1531 QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
1532 /* Write command (except of first byte) and FD */
1533 memcpy(&p[1], &cl[1], 7 * 4);
1534 memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd));
1538 s->eqcr.available--;
1543 initial_pi = s->eqcr.pi;
1546 /* in order for flushes to complete faster:
1547 * we use a following trick: we record all lines in 32 bit word
1550 initial_pi = s->eqcr.pi;
1551 for (i = 0; i < sent; i++) {
1552 p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
1553 QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
1555 p[0] = cl[0] | s->eqcr.pi_vb;
1559 if (!(initial_pi & 7))
1560 s->eqcr.pi_vb ^= QB_VALID_BIT;
1563 initial_pi = s->eqcr.pi;
1565 /* We need to flush all the lines but without
1566 * load/store operations between them.
1567 * We assign start_pointer before we start loop so that
1568 * in loop we do not read it from memory
1570 start_pointer = (uint64_t)s->sys.addr_cena;
1571 for (i = 0; i < sent; i++) {
1572 p = (uint32_t *)(start_pointer
1573 + QBMAN_CENA_SWP_EQCR(initial_pi & 7));
1579 /* Update producer index for the next call */
1580 s->eqcr.pi = initial_pi;
1585 int qbman_get_version(void)
1587 return qman_version;