1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
7 #include "qbman_portal.h"
9 /* QBMan portal management command codes */
10 #define QBMAN_MC_ACQUIRE 0x30
11 #define QBMAN_WQCHAN_CONFIGURE 0x46
13 /* CINH register offsets */
14 #define QBMAN_CINH_SWP_EQCR_PI 0x800
15 #define QBMAN_CINH_SWP_EQCR_CI 0x840
16 #define QBMAN_CINH_SWP_EQAR 0x8c0
17 #define QBMAN_CINH_SWP_DQPI 0xa00
18 #define QBMAN_CINH_SWP_DCAP 0xac0
19 #define QBMAN_CINH_SWP_SDQCR 0xb00
20 #define QBMAN_CINH_SWP_RAR 0xcc0
21 #define QBMAN_CINH_SWP_ISR 0xe00
22 #define QBMAN_CINH_SWP_IER 0xe40
23 #define QBMAN_CINH_SWP_ISDR 0xe80
24 #define QBMAN_CINH_SWP_IIR 0xec0
25 #define QBMAN_CINH_SWP_DQRR_ITR 0xa80
26 #define QBMAN_CINH_SWP_ITPR 0xf40
28 /* CENA register offsets */
29 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
30 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
31 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
32 #define QBMAN_CENA_SWP_CR 0x600
33 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
34 #define QBMAN_CENA_SWP_VDQCR 0x780
35 #define QBMAN_CENA_SWP_EQCR_CI 0x840
37 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
38 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
40 /* QBMan FQ management command codes */
41 #define QBMAN_FQ_SCHEDULE 0x48
42 #define QBMAN_FQ_FORCE 0x49
43 #define QBMAN_FQ_XON 0x4d
44 #define QBMAN_FQ_XOFF 0x4e
46 /*******************************/
47 /* Pre-defined attribute codes */
48 /*******************************/
50 #define QBMAN_RESPONSE_VERB_MASK 0x7f
52 /*************************/
53 /* SDQCR attribute codes */
54 /*************************/
55 #define QB_SDQCR_FC_SHIFT 29
56 #define QB_SDQCR_FC_MASK 0x1
57 #define QB_SDQCR_DCT_SHIFT 24
58 #define QB_SDQCR_DCT_MASK 0x3
59 #define QB_SDQCR_TOK_SHIFT 16
60 #define QB_SDQCR_TOK_MASK 0xff
61 #define QB_SDQCR_SRC_SHIFT 0
62 #define QB_SDQCR_SRC_MASK 0xffff
64 /* opaque token for static dequeues */
65 #define QMAN_SDQCR_TOKEN 0xbb
67 enum qbman_sdqcr_dct {
68 qbman_sdqcr_dct_null = 0,
69 qbman_sdqcr_dct_prio_ics,
70 qbman_sdqcr_dct_active_ics,
71 qbman_sdqcr_dct_active
75 qbman_sdqcr_fc_one = 0,
76 qbman_sdqcr_fc_up_to_3 = 1
79 /* We need to keep track of which SWP triggered a pull command
80 * so keep an array of portal IDs and use the token field to
81 * be able to find the proper portal
83 #define MAX_QBMAN_PORTALS 64
84 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
86 /*********************************/
87 /* Portal constructor/destructor */
88 /*********************************/
90 /* Software portals should always be in the power-on state when we initialise,
91 * due to the CCSR-based portal reset functionality that MC has.
93 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
94 * valid-bits, so we need to support a workaround where we don't trust
95 * valid-bits when detecting new entries until any stale ring entries have been
96 * overwritten at least once. The idea is that we read PI for the first few
97 * entries, then switch to valid-bit after that. The trick is to clear the
98 * bug-work-around boolean once the PI wraps around the ring for the first time.
100 * Note: this still carries a slight additional cost once the decrementer hits
103 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
107 struct qbman_swp *p = malloc(sizeof(*p));
112 #ifdef QBMAN_CHECKING
113 p->mc.check = swp_mc_can_start;
115 p->mc.valid_bit = QB_VALID_BIT;
117 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
118 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
119 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
121 atomic_set(&p->vdq.busy, 1);
122 p->vdq.valid_bit = QB_VALID_BIT;
123 p->dqrr.next_idx = 0;
124 p->dqrr.valid_bit = QB_VALID_BIT;
125 qman_version = p->desc.qman_version;
126 if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
127 p->dqrr.dqrr_size = 4;
128 p->dqrr.reset_bug = 1;
130 p->dqrr.dqrr_size = 8;
131 p->dqrr.reset_bug = 0;
134 ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
137 pr_err("qbman_swp_sys_init() failed %d\n", ret);
140 /* SDQCR needs to be initialized to 0 when no channels are
141 * being dequeued from or else the QMan HW will indicate an
142 * error. The values that were calculated above will be
143 * applied when dequeues from a specific channel are enabled.
145 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
146 eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
147 p->eqcr.pi = eqcr_pi & 0xF;
148 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
149 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
150 p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
151 p->eqcr.ci, p->eqcr.pi);
153 portal_idx_map[p->desc.idx] = p;
157 void qbman_swp_finish(struct qbman_swp *p)
159 #ifdef QBMAN_CHECKING
160 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
162 qbman_swp_sys_finish(&p->sys);
163 portal_idx_map[p->desc.idx] = NULL;
167 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
176 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
178 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
181 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
183 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
186 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
188 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
191 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
193 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
196 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
198 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
201 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
203 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
206 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
208 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
211 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
213 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
216 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
218 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
221 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
223 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
226 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
228 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
231 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
233 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
236 /***********************/
237 /* Management commands */
238 /***********************/
241 * Internal code common to all types of management commands.
244 void *qbman_swp_mc_start(struct qbman_swp *p)
247 #ifdef QBMAN_CHECKING
248 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
250 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
251 #ifdef QBMAN_CHECKING
253 p->mc.check = swp_mc_can_submit;
258 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
261 #ifdef QBMAN_CHECKING
262 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
264 /* TBD: "|=" is going to hurt performance. Need to move as many fields
265 * out of word zero, and for those that remain, the "OR" needs to occur
266 * at the caller side. This debug check helps to catch cases where the
267 * caller wants to OR but has forgotten to do so.
269 QBMAN_BUG_ON((*v & cmd_verb) != *v);
270 *v = cmd_verb | p->mc.valid_bit;
271 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
272 #ifdef QBMAN_CHECKING
273 p->mc.check = swp_mc_can_poll;
277 void *qbman_swp_mc_result(struct qbman_swp *p)
280 #ifdef QBMAN_CHECKING
281 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
283 qbman_cena_invalidate_prefetch(&p->sys,
284 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
285 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
286 /* Remove the valid-bit - command completed if the rest is non-zero */
287 verb = ret[0] & ~QB_VALID_BIT;
290 #ifdef QBMAN_CHECKING
291 p->mc.check = swp_mc_can_start;
293 p->mc.valid_bit ^= QB_VALID_BIT;
301 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
302 enum qb_enqueue_commands {
304 enqueue_response_always = 1,
305 enqueue_rejects_to_fq = 2
308 #define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
309 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
310 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
311 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
312 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
313 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
314 #define QB_ENQUEUE_CMD_NLIS_SHIFT 14
315 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
317 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
319 memset(d, 0, sizeof(*d));
322 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
324 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
326 d->eq.verb |= enqueue_response_always;
328 d->eq.verb |= enqueue_rejects_to_fq;
331 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
332 uint16_t opr_id, uint16_t seqnum, int incomplete)
334 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
336 d->eq.verb |= enqueue_response_always;
338 d->eq.verb |= enqueue_rejects_to_fq;
340 d->eq.orpid = opr_id;
341 d->eq.seqnum = seqnum;
343 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
345 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
348 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
351 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
352 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
353 d->eq.orpid = opr_id;
354 d->eq.seqnum = seqnum;
355 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
356 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
359 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
362 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
363 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
364 d->eq.orpid = opr_id;
365 d->eq.seqnum = seqnum;
366 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
367 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
370 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
371 dma_addr_t storage_phys,
374 d->eq.rsp_addr = storage_phys;
378 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
383 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
385 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
389 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
390 uint16_t qd_bin, uint8_t qd_prio)
392 d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
394 d->eq.qdbin = qd_bin;
395 d->eq.qpri = qd_prio;
398 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
401 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
403 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
406 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
407 uint8_t dqrr_idx, int park)
410 d->eq.dca = dqrr_idx;
412 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
414 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
415 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
417 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
421 #define EQAR_IDX(eqar) ((eqar) & 0x7)
422 #define EQAR_VB(eqar) ((eqar) & 0x80)
423 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
425 static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
426 const struct qbman_eq_desc *d,
427 const struct qbman_fd *fd)
430 const uint32_t *cl = qb_cl(d);
431 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
433 pr_debug("EQAR=%08x\n", eqar);
434 if (!EQAR_SUCCESS(eqar))
436 p = qbman_cena_write_start_wo_shadow(&s->sys,
437 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
438 memcpy(&p[1], &cl[1], 28);
439 memcpy(&p[8], fd, sizeof(*fd));
440 /* Set the verb byte, have to substitute in the valid-bit */
442 p[0] = cl[0] | EQAR_VB(eqar);
443 qbman_cena_write_complete_wo_shadow(&s->sys,
444 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
448 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
449 const struct qbman_eq_desc *d,
450 const struct qbman_fd *fd)
453 const uint32_t *cl = qb_cl(d);
457 if (!s->eqcr.available) {
458 eqcr_ci = s->eqcr.ci;
459 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
460 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
461 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
462 eqcr_ci, s->eqcr.ci);
463 s->eqcr.available += diff;
468 p = qbman_cena_write_start_wo_shadow(&s->sys,
469 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
470 memcpy(&p[1], &cl[1], 28);
471 memcpy(&p[8], fd, sizeof(*fd));
474 /* Set the verb byte, have to substitute in the valid-bit */
475 p[0] = cl[0] | s->eqcr.pi_vb;
476 qbman_cena_write_complete_wo_shadow(&s->sys,
477 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
481 if (!(s->eqcr.pi & 7))
482 s->eqcr.pi_vb ^= QB_VALID_BIT;
487 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
488 const struct qbman_fd *fd)
490 if (s->sys.eqcr_mode == qman_eqcr_vb_array)
491 return qbman_swp_enqueue_array_mode(s, d, fd);
492 else /* Use ring mode by default */
493 return qbman_swp_enqueue_ring_mode(s, d, fd);
496 int qbman_swp_enqueue_multiple(struct qbman_swp *s,
497 const struct qbman_eq_desc *d,
498 const struct qbman_fd *fd,
503 const uint32_t *cl = qb_cl(d);
504 uint32_t eqcr_ci, eqcr_pi;
506 int i, num_enqueued = 0;
509 if (!s->eqcr.available) {
510 eqcr_ci = s->eqcr.ci;
511 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
512 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
513 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
514 eqcr_ci, s->eqcr.ci);
515 s->eqcr.available += diff;
520 eqcr_pi = s->eqcr.pi;
521 num_enqueued = (s->eqcr.available < num_frames) ?
522 s->eqcr.available : num_frames;
523 s->eqcr.available -= num_enqueued;
524 /* Fill in the EQCR ring */
525 for (i = 0; i < num_enqueued; i++) {
526 p = qbman_cena_write_start_wo_shadow(&s->sys,
527 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
528 memcpy(&p[1], &cl[1], 28);
529 memcpy(&p[8], &fd[i], sizeof(*fd));
536 /* Set the verb byte, have to substitute in the valid-bit */
537 eqcr_pi = s->eqcr.pi;
538 for (i = 0; i < num_enqueued; i++) {
539 p = qbman_cena_write_start_wo_shadow(&s->sys,
540 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
541 p[0] = cl[0] | s->eqcr.pi_vb;
542 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
543 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
545 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
546 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
551 s->eqcr.pi_vb ^= QB_VALID_BIT;
554 /* Flush all the cacheline without load/store in between */
555 eqcr_pi = s->eqcr.pi;
556 addr_cena = (uint64_t)s->sys.addr_cena;
557 for (i = 0; i < num_enqueued; i++) {
558 dcbf((uint64_t *)(addr_cena +
559 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
563 s->eqcr.pi = eqcr_pi;
568 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
569 const struct qbman_eq_desc *d,
570 const struct qbman_fd *fd,
575 uint32_t eqcr_ci, eqcr_pi;
577 int i, num_enqueued = 0;
580 if (!s->eqcr.available) {
581 eqcr_ci = s->eqcr.ci;
582 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
583 QBMAN_CENA_SWP_EQCR_CI) & 0xF;
584 diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
585 eqcr_ci, s->eqcr.ci);
586 s->eqcr.available += diff;
591 eqcr_pi = s->eqcr.pi;
592 num_enqueued = (s->eqcr.available < num_frames) ?
593 s->eqcr.available : num_frames;
594 s->eqcr.available -= num_enqueued;
595 /* Fill in the EQCR ring */
596 for (i = 0; i < num_enqueued; i++) {
597 p = qbman_cena_write_start_wo_shadow(&s->sys,
598 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
600 memcpy(&p[1], &cl[1], 28);
601 memcpy(&p[8], &fd[i], sizeof(*fd));
608 /* Set the verb byte, have to substitute in the valid-bit */
609 eqcr_pi = s->eqcr.pi;
610 for (i = 0; i < num_enqueued; i++) {
611 p = qbman_cena_write_start_wo_shadow(&s->sys,
612 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
614 p[0] = cl[0] | s->eqcr.pi_vb;
618 s->eqcr.pi_vb ^= QB_VALID_BIT;
621 /* Flush all the cacheline without load/store in between */
622 eqcr_pi = s->eqcr.pi;
623 addr_cena = (uint64_t)s->sys.addr_cena;
624 for (i = 0; i < num_enqueued; i++) {
625 dcbf((uint64_t *)(addr_cena +
626 QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
630 s->eqcr.pi = eqcr_pi;
635 /*************************/
636 /* Static (push) dequeue */
637 /*************************/
639 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
641 uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
643 QBMAN_BUG_ON(channel_idx > 15);
644 *enabled = src | (1 << channel_idx);
647 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
651 QBMAN_BUG_ON(channel_idx > 15);
653 s->sdq |= 1 << channel_idx;
655 s->sdq &= ~(1 << channel_idx);
657 /* Read make the complete src map. If no channels are enabled
658 * the SDQCR must be 0 or else QMan will assert errors
660 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
662 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
664 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
667 /***************************/
668 /* Volatile (pull) dequeue */
669 /***************************/
671 /* These should be const, eventually */
672 #define QB_VDQCR_VERB_DCT_SHIFT 0
673 #define QB_VDQCR_VERB_DT_SHIFT 2
674 #define QB_VDQCR_VERB_RLS_SHIFT 4
675 #define QB_VDQCR_VERB_WAE_SHIFT 5
679 qb_pull_dt_workqueue,
680 qb_pull_dt_framequeue
683 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
685 memset(d, 0, sizeof(*d));
688 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
689 struct qbman_result *storage,
690 dma_addr_t storage_phys,
693 d->pull.rsp_addr_virt = (uint64_t)storage;
696 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
699 d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
701 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
703 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
705 d->pull.rsp_addr = storage_phys;
708 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
710 d->pull.numf = numframes - 1;
713 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
718 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
720 d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
721 d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
722 d->pull.dq_src = fqid;
725 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
726 enum qbman_pull_type_e dct)
728 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
729 d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
730 d->pull.dq_src = wqid;
733 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
734 enum qbman_pull_type_e dct)
736 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
737 d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
738 d->pull.dq_src = chid;
741 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
744 uint32_t *cl = qb_cl(d);
746 if (!atomic_dec_and_test(&s->vdq.busy)) {
747 atomic_inc(&s->vdq.busy);
751 d->pull.tok = s->sys.idx + 1;
752 s->vdq.storage = (void *)d->pull.rsp_addr_virt;
753 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
754 memcpy(&p[1], &cl[1], 12);
756 /* Set the verb byte, have to substitute in the valid-bit */
758 p[0] = cl[0] | s->vdq.valid_bit;
759 s->vdq.valid_bit ^= QB_VALID_BIT;
760 qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
769 #define QMAN_DQRR_PI_MASK 0xf
771 #define QBMAN_RESULT_DQ 0x60
772 #define QBMAN_RESULT_FQRN 0x21
773 #define QBMAN_RESULT_FQRNI 0x22
774 #define QBMAN_RESULT_FQPN 0x24
775 #define QBMAN_RESULT_FQDAN 0x25
776 #define QBMAN_RESULT_CDAN 0x26
777 #define QBMAN_RESULT_CSCN_MEM 0x27
778 #define QBMAN_RESULT_CGCU 0x28
779 #define QBMAN_RESULT_BPSCN 0x29
780 #define QBMAN_RESULT_CSCN_WQ 0x2a
782 #include <rte_prefetch.h>
784 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
786 const struct qbman_result *p;
788 p = qbman_cena_read_wo_shadow(&s->sys,
789 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
793 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
794 * only once, so repeated calls can return a sequence of DQRR entries, without
795 * requiring they be consumed immediately or in any particular order.
797 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
800 uint32_t response_verb;
802 const struct qbman_result *p;
804 /* Before using valid-bit to detect if something is there, we have to
805 * handle the case of the DQRR reset bug...
807 if (unlikely(s->dqrr.reset_bug)) {
808 /* We pick up new entries by cache-inhibited producer index,
809 * which means that a non-coherent mapping would require us to
810 * invalidate and read *only* once that PI has indicated that
811 * there's an entry here. The first trip around the DQRR ring
812 * will be much less efficient than all subsequent trips around
815 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
818 /* there are new entries if pi != next_idx */
819 if (pi == s->dqrr.next_idx)
822 /* if next_idx is/was the last ring index, and 'pi' is
823 * different, we can disable the workaround as all the ring
824 * entries have now been DMA'd to so valid-bit checking is
825 * repaired. Note: this logic needs to be based on next_idx
826 * (which increments one at a time), rather than on pi (which
827 * can burst and wrap-around between our snapshots of it).
829 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
830 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
831 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
832 s->dqrr.next_idx, pi);
833 s->dqrr.reset_bug = 0;
835 qbman_cena_invalidate_prefetch(&s->sys,
836 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
838 p = qbman_cena_read_wo_shadow(&s->sys,
839 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
842 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
843 * in the DQRR reset bug workaround, we shouldn't need to skip these
844 * check, because we've already determined that a new entry is available
845 * and we've invalidated the cacheline before reading it, so the
846 * valid-bit behaviour is repaired and should tell us what we already
847 * knew from reading PI.
849 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
852 /* There's something there. Move "next_idx" attention to the next ring
853 * entry (and prefetch it) before returning what we found.
856 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
857 s->dqrr.next_idx = 0;
858 s->dqrr.valid_bit ^= QB_VALID_BIT;
860 /* If this is the final response to a volatile dequeue command
861 * indicate that the vdq is no longer busy
864 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
865 if ((response_verb == QBMAN_RESULT_DQ) &&
866 (flags & QBMAN_DQ_STAT_VOLATILE) &&
867 (flags & QBMAN_DQ_STAT_EXPIRED))
868 atomic_inc(&s->vdq.busy);
873 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
874 void qbman_swp_dqrr_consume(struct qbman_swp *s,
875 const struct qbman_result *dq)
877 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
880 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
881 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
884 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
887 /*********************************/
888 /* Polling user-provided storage */
889 /*********************************/
890 int qbman_result_has_new_result(struct qbman_swp *s,
891 struct qbman_result *dq)
897 * Set token to be 0 so we will detect change back to 1
898 * next time the looping is traversed. Const is cast away here
899 * as we want users to treat the dequeue responses as read only.
901 ((struct qbman_result *)dq)->dq.tok = 0;
904 * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
905 * fact "VDQCR" shows busy doesn't mean that we hold the result that
906 * makes it available. Eg. we may be looking at our 10th dequeue result,
907 * having released VDQCR after the 1st result and it is now busy due to
908 * some other command!
910 if (s->vdq.storage == dq) {
911 s->vdq.storage = NULL;
912 atomic_inc(&s->vdq.busy);
918 int qbman_check_new_result(struct qbman_result *dq)
924 * Set token to be 0 so we will detect change back to 1
925 * next time the looping is traversed. Const is cast away here
926 * as we want users to treat the dequeue responses as read only.
928 ((struct qbman_result *)dq)->dq.tok = 0;
933 int qbman_check_command_complete(struct qbman_result *dq)
940 s = portal_idx_map[dq->dq.tok - 1];
942 * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
943 * fact "VDQCR" shows busy doesn't mean that we hold the result that
944 * makes it available. Eg. we may be looking at our 10th dequeue result,
945 * having released VDQCR after the 1st result and it is now busy due to
946 * some other command!
948 if (s->vdq.storage == dq) {
949 s->vdq.storage = NULL;
950 atomic_inc(&s->vdq.busy);
956 /********************************/
957 /* Categorising qbman results */
958 /********************************/
960 static inline int __qbman_result_is_x(const struct qbman_result *dq,
963 uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
965 return (response_verb == x);
968 int qbman_result_is_DQ(const struct qbman_result *dq)
970 return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
973 int qbman_result_is_FQDAN(const struct qbman_result *dq)
975 return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
978 int qbman_result_is_CDAN(const struct qbman_result *dq)
980 return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
983 int qbman_result_is_CSCN(const struct qbman_result *dq)
985 return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
986 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
989 int qbman_result_is_BPSCN(const struct qbman_result *dq)
991 return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
994 int qbman_result_is_CGCU(const struct qbman_result *dq)
996 return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
999 int qbman_result_is_FQRN(const struct qbman_result *dq)
1001 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1004 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1006 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1009 int qbman_result_is_FQPN(const struct qbman_result *dq)
1011 return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1014 /*********************************/
1015 /* Parsing frame dequeue results */
1016 /*********************************/
1018 /* These APIs assume qbman_result_is_DQ() is TRUE */
1020 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1025 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1027 return dq->dq.seqnum;
1030 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1032 return dq->dq.oprid;
1035 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1040 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1042 return dq->dq.fq_byte_cnt;
1045 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1047 return dq->dq.fq_frm_cnt;
1050 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1052 return dq->dq.fqd_ctx;
1055 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1057 return (const struct qbman_fd *)&dq->dq.fd[0];
1060 /**************************************/
1061 /* Parsing state-change notifications */
1062 /**************************************/
1063 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1065 return scn->scn.state;
1068 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1070 return scn->scn.rid_tok;
1073 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1075 return scn->scn.ctx;
1081 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1083 return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1086 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1088 return !(int)(qbman_result_SCN_state(scn) & 0x1);
1091 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1093 return (int)(qbman_result_SCN_state(scn) & 0x2);
1096 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1098 return (int)(qbman_result_SCN_state(scn) & 0x4);
1101 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1103 return qbman_result_SCN_ctx(scn);
1109 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1111 return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
1114 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1116 return qbman_result_SCN_ctx(scn);
1119 /******************/
1120 /* Buffer release */
1121 /******************/
1122 #define QB_BR_RC_VALID_SHIFT 5
1123 #define QB_BR_RCDI_SHIFT 6
1125 void qbman_release_desc_clear(struct qbman_release_desc *d)
1127 memset(d, 0, sizeof(*d));
1128 d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
1131 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
1136 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1139 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
1141 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
1144 #define RAR_IDX(rar) ((rar) & 0x7)
1145 #define RAR_VB(rar) ((rar) & 0x80)
1146 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1148 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1149 const uint64_t *buffers, unsigned int num_buffers)
1152 const uint32_t *cl = qb_cl(d);
1153 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1155 pr_debug("RAR=%08x\n", rar);
1156 if (!RAR_SUCCESS(rar))
1159 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1161 /* Start the release command */
1162 p = qbman_cena_write_start_wo_shadow(&s->sys,
1163 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1165 /* Copy the caller's buffer pointers to the command */
1166 u64_to_le32_copy(&p[2], buffers, num_buffers);
1168 /* Set the verb byte, have to substitute in the valid-bit and the number
1172 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1173 qbman_cena_write_complete_wo_shadow(&s->sys,
1174 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1179 /*******************/
1180 /* Buffer acquires */
1181 /*******************/
1182 struct qbman_acquire_desc {
1187 uint8_t reserved2[59];
1190 struct qbman_acquire_rslt {
1195 uint8_t reserved2[3];
1199 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1200 unsigned int num_buffers)
1202 struct qbman_acquire_desc *p;
1203 struct qbman_acquire_rslt *r;
1205 if (!num_buffers || (num_buffers > 7))
1208 /* Start the management command */
1209 p = qbman_swp_mc_start(s);
1214 /* Encode the caller-provided attributes */
1216 p->num = num_buffers;
1218 /* Complete the management command */
1219 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1221 pr_err("qbman: acquire from BPID %d failed, no response\n",
1226 /* Decode the outcome */
1227 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
1229 /* Determine success or failure */
1230 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1231 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1236 QBMAN_BUG_ON(r->num > num_buffers);
1238 /* Copy the acquired buffers to the caller's array */
1239 u64_from_le32_copy(buffers, &r->buf[0], r->num);
1247 struct qbman_alt_fq_state_desc {
1249 uint8_t reserved[3];
1251 uint8_t reserved2[56];
1254 struct qbman_alt_fq_state_rslt {
1257 uint8_t reserved[62];
1260 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1262 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1263 uint8_t alt_fq_verb)
1265 struct qbman_alt_fq_state_desc *p;
1266 struct qbman_alt_fq_state_rslt *r;
1268 /* Start the management command */
1269 p = qbman_swp_mc_start(s);
1273 p->fqid = fqid & ALT_FQ_FQID_MASK;
1275 /* Complete the management command */
1276 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1278 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1283 /* Decode the outcome */
1284 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
1286 /* Determine success or failure */
1287 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1288 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1289 fqid, alt_fq_verb, r->rslt);
1296 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1298 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1301 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1303 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1306 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1308 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1311 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1313 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1316 /**********************/
1317 /* Channel management */
1318 /**********************/
1320 struct qbman_cdan_ctrl_desc {
1328 uint8_t reserved3[48];
1332 struct qbman_cdan_ctrl_rslt {
1336 uint8_t reserved[60];
1339 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1340 * would be irresponsible to expose it.
1342 #define CODE_CDAN_WE_EN 0x1
1343 #define CODE_CDAN_WE_CTX 0x4
1345 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1346 uint8_t we_mask, uint8_t cdan_en,
1349 struct qbman_cdan_ctrl_desc *p;
1350 struct qbman_cdan_ctrl_rslt *r;
1352 /* Start the management command */
1353 p = qbman_swp_mc_start(s);
1357 /* Encode the caller-provided attributes */
1366 /* Complete the management command */
1367 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1369 pr_err("qbman: wqchan config failed, no response\n");
1373 /* Decode the outcome */
1374 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
1375 != QBMAN_WQCHAN_CONFIGURE);
1377 /* Determine success or failure */
1378 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1379 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1380 channelid, r->rslt);
1387 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1390 return qbman_swp_CDAN_set(s, channelid,
1395 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1397 return qbman_swp_CDAN_set(s, channelid,
1402 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1404 return qbman_swp_CDAN_set(s, channelid,
1409 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1412 return qbman_swp_CDAN_set(s, channelid,
1413 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1417 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1419 return QBMAN_IDX_FROM_DQRR(dqrr);
1422 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1424 struct qbman_result *dq;
1426 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));