1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
9 #include "qbman_portal.h"
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE 0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE 0x48
20 #define QBMAN_FQ_FORCE 0x49
21 #define QBMAN_FQ_XON 0x4d
22 #define QBMAN_FQ_XOFF 0x4e
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
28 #define QBMAN_RESPONSE_VERB_MASK 0x7f
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT 29
34 #define QB_SDQCR_FC_MASK 0x1
35 #define QB_SDQCR_DCT_SHIFT 24
36 #define QB_SDQCR_DCT_MASK 0x3
37 #define QB_SDQCR_TOK_SHIFT 16
38 #define QB_SDQCR_TOK_MASK 0xff
39 #define QB_SDQCR_SRC_SHIFT 0
40 #define QB_SDQCR_SRC_MASK 0xffff
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN 0xbb
45 enum qbman_sdqcr_dct {
46 qbman_sdqcr_dct_null = 0,
47 qbman_sdqcr_dct_prio_ics,
48 qbman_sdqcr_dct_active_ics,
49 qbman_sdqcr_dct_active
53 qbman_sdqcr_fc_one = 0,
54 qbman_sdqcr_fc_up_to_3 = 1
57 /* We need to keep track of which SWP triggered a pull command
58 * so keep an array of portal IDs and use the token field to
59 * be able to find the proper portal
61 #define MAX_QBMAN_PORTALS 64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
64 /* Internal Function declaration */
66 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
67 const struct qbman_eq_desc *d,
68 const struct qbman_fd *fd);
70 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
71 const struct qbman_eq_desc *d,
72 const struct qbman_fd *fd);
75 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
76 const struct qbman_eq_desc *d,
77 const struct qbman_fd *fd);
79 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
80 const struct qbman_eq_desc *d,
81 const struct qbman_fd *fd);
84 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
85 const struct qbman_eq_desc *d,
86 const struct qbman_fd *fd,
90 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
91 const struct qbman_eq_desc *d,
92 const struct qbman_fd *fd,
97 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
98 const struct qbman_eq_desc *d,
99 const struct qbman_fd *fd,
102 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
103 const struct qbman_eq_desc *d,
104 const struct qbman_fd *fd,
108 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
110 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
112 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
113 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
116 qbman_swp_release_direct(struct qbman_swp *s,
117 const struct qbman_release_desc *d,
118 const uint64_t *buffers, unsigned int num_buffers);
120 qbman_swp_release_mem_back(struct qbman_swp *s,
121 const struct qbman_release_desc *d,
122 const uint64_t *buffers, unsigned int num_buffers);
124 /* Function pointers */
125 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
126 const struct qbman_eq_desc *d,
127 const struct qbman_fd *fd)
128 = qbman_swp_enqueue_array_mode_direct;
130 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
131 const struct qbman_eq_desc *d,
132 const struct qbman_fd *fd)
133 = qbman_swp_enqueue_ring_mode_direct;
135 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
136 const struct qbman_eq_desc *d,
137 const struct qbman_fd *fd,
140 = qbman_swp_enqueue_multiple_direct;
142 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
143 const struct qbman_eq_desc *d,
144 const struct qbman_fd *fd,
146 = qbman_swp_enqueue_multiple_desc_direct;
148 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
149 struct qbman_pull_desc *d)
150 = qbman_swp_pull_direct;
152 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
153 = qbman_swp_dqrr_next_direct;
155 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
156 const struct qbman_release_desc *d,
157 const uint64_t *buffers, unsigned int num_buffers)
158 = qbman_swp_release_direct;
160 /*********************************/
161 /* Portal constructor/destructor */
162 /*********************************/
164 /* Software portals should always be in the power-on state when we initialise,
165 * due to the CCSR-based portal reset functionality that MC has.
167 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
168 * valid-bits, so we need to support a workaround where we don't trust
169 * valid-bits when detecting new entries until any stale ring entries have been
170 * overwritten at least once. The idea is that we read PI for the first few
171 * entries, then switch to valid-bit after that. The trick is to clear the
172 * bug-work-around boolean once the PI wraps around the ring for the first time.
174 * Note: this still carries a slight additional cost once the decrementer hits
177 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
182 struct qbman_swp *p = malloc(sizeof(*p));
187 memset(p, 0, sizeof(struct qbman_swp));
190 #ifdef QBMAN_CHECKING
191 p->mc.check = swp_mc_can_start;
193 p->mc.valid_bit = QB_VALID_BIT;
194 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
195 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
196 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
197 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
198 && (d->cena_access_mode == qman_cena_fastest_access))
199 p->mr.valid_bit = QB_VALID_BIT;
201 atomic_set(&p->vdq.busy, 1);
202 p->vdq.valid_bit = QB_VALID_BIT;
203 p->dqrr.valid_bit = QB_VALID_BIT;
204 qman_version = p->desc.qman_version;
205 if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
206 p->dqrr.dqrr_size = 4;
207 p->dqrr.reset_bug = 1;
209 p->dqrr.dqrr_size = 8;
210 p->dqrr.reset_bug = 0;
213 ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
216 pr_err("qbman_swp_sys_init() failed %d\n", ret);
220 /* Verify that the DQRRPI is 0 - if it is not the portal isn't
221 * in default state which is an error
223 if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
224 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
229 /* SDQCR needs to be initialized to 0 when no channels are
230 * being dequeued from or else the QMan HW will indicate an
231 * error. The values that were calculated above will be
232 * applied when dequeues from a specific channel are enabled.
234 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
236 p->eqcr.pi_ring_size = 8;
237 if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
238 && (d->cena_access_mode == qman_cena_fastest_access)) {
239 p->eqcr.pi_ring_size = 32;
240 qbman_swp_enqueue_array_mode_ptr =
241 qbman_swp_enqueue_array_mode_mem_back;
242 qbman_swp_enqueue_ring_mode_ptr =
243 qbman_swp_enqueue_ring_mode_mem_back;
244 qbman_swp_enqueue_multiple_ptr =
245 qbman_swp_enqueue_multiple_mem_back;
246 qbman_swp_enqueue_multiple_desc_ptr =
247 qbman_swp_enqueue_multiple_desc_mem_back;
248 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
249 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
250 qbman_swp_release_ptr = qbman_swp_release_mem_back;
253 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
254 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
255 eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
256 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
257 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
258 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
259 && (d->cena_access_mode == qman_cena_fastest_access))
260 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
261 & p->eqcr.pi_ci_mask;
263 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
264 & p->eqcr.pi_ci_mask;
265 p->eqcr.available = p->eqcr.pi_ring_size -
266 qm_cyc_diff(p->eqcr.pi_ring_size,
267 p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
268 p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
270 portal_idx_map[p->desc.idx] = p;
274 void qbman_swp_finish(struct qbman_swp *p)
276 #ifdef QBMAN_CHECKING
277 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
279 qbman_swp_sys_finish(&p->sys);
280 portal_idx_map[p->desc.idx] = NULL;
284 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
293 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
295 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
298 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
300 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
303 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
305 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
308 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
310 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
313 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
315 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
318 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
320 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
323 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
325 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
328 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
330 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
333 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
335 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
338 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
340 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
343 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
345 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
348 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
350 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
351 inhibit ? 0xffffffff : 0);
354 /***********************/
355 /* Management commands */
356 /***********************/
359 * Internal code common to all types of management commands.
362 void *qbman_swp_mc_start(struct qbman_swp *p)
365 #ifdef QBMAN_CHECKING
366 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
368 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
369 && (p->desc.cena_access_mode == qman_cena_fastest_access))
370 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
372 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
373 #ifdef QBMAN_CHECKING
375 p->mc.check = swp_mc_can_submit;
380 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
383 #ifdef QBMAN_CHECKING
384 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
386 /* TBD: "|=" is going to hurt performance. Need to move as many fields
387 * out of word zero, and for those that remain, the "OR" needs to occur
388 * at the caller side. This debug check helps to catch cases where the
389 * caller wants to OR but has forgotten to do so.
391 QBMAN_BUG_ON((*v & cmd_verb) != *v);
392 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
393 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
394 *v = cmd_verb | p->mr.valid_bit;
395 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
397 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
400 *v = cmd_verb | p->mc.valid_bit;
401 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
404 #ifdef QBMAN_CHECKING
405 p->mc.check = swp_mc_can_poll;
409 void *qbman_swp_mc_result(struct qbman_swp *p)
412 #ifdef QBMAN_CHECKING
413 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
415 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
416 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
417 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
418 /* Command completed if the valid bit is toggled */
419 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
421 /* Remove the valid-bit -
422 * command completed iff the rest is non-zero
424 verb = ret[0] & ~QB_VALID_BIT;
427 p->mr.valid_bit ^= QB_VALID_BIT;
429 qbman_cena_invalidate_prefetch(&p->sys,
430 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
431 ret = qbman_cena_read(&p->sys,
432 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
433 /* Remove the valid-bit -
434 * command completed iff the rest is non-zero
436 verb = ret[0] & ~QB_VALID_BIT;
439 p->mc.valid_bit ^= QB_VALID_BIT;
441 #ifdef QBMAN_CHECKING
442 p->mc.check = swp_mc_can_start;
451 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
452 enum qb_enqueue_commands {
454 enqueue_response_always = 1,
455 enqueue_rejects_to_fq = 2
458 #define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
459 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
460 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
461 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
462 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
463 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
464 #define QB_ENQUEUE_CMD_NLIS_SHIFT 14
465 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
467 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
469 memset(d, 0, sizeof(*d));
472 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
474 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
476 d->eq.verb |= enqueue_response_always;
478 d->eq.verb |= enqueue_rejects_to_fq;
481 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
482 uint16_t opr_id, uint16_t seqnum, int incomplete)
484 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
486 d->eq.verb |= enqueue_response_always;
488 d->eq.verb |= enqueue_rejects_to_fq;
490 d->eq.orpid = opr_id;
491 d->eq.seqnum = seqnum;
493 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
495 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
498 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
501 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
502 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
503 d->eq.orpid = opr_id;
504 d->eq.seqnum = seqnum;
505 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
506 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
509 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
512 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
513 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
514 d->eq.orpid = opr_id;
515 d->eq.seqnum = seqnum;
516 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
517 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
520 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
521 dma_addr_t storage_phys,
524 d->eq.rsp_addr = storage_phys;
528 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
533 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
535 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
539 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
540 uint16_t qd_bin, uint8_t qd_prio)
542 d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
544 d->eq.qdbin = qd_bin;
545 d->eq.qpri = qd_prio;
548 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
551 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
553 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
556 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
557 uint8_t dqrr_idx, int park)
560 d->eq.dca = dqrr_idx;
562 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
564 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
565 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
567 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
571 #define EQAR_IDX(eqar) ((eqar) & 0x1f)
572 #define EQAR_VB(eqar) ((eqar) & 0x80)
573 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
575 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
579 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
582 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
588 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
589 const struct qbman_eq_desc *d,
590 const struct qbman_fd *fd)
593 const uint32_t *cl = qb_cl(d);
594 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
596 pr_debug("EQAR=%08x\n", eqar);
597 if (!EQAR_SUCCESS(eqar))
599 p = qbman_cena_write_start_wo_shadow(&s->sys,
600 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
601 memcpy(&p[1], &cl[1], 28);
602 memcpy(&p[8], fd, sizeof(*fd));
604 /* Set the verb byte, have to substitute in the valid-bit */
606 p[0] = cl[0] | EQAR_VB(eqar);
607 qbman_cena_write_complete_wo_shadow(&s->sys,
608 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
611 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
612 const struct qbman_eq_desc *d,
613 const struct qbman_fd *fd)
616 const uint32_t *cl = qb_cl(d);
617 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
619 pr_debug("EQAR=%08x\n", eqar);
620 if (!EQAR_SUCCESS(eqar))
622 p = qbman_cena_write_start_wo_shadow(&s->sys,
623 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
624 memcpy(&p[1], &cl[1], 28);
625 memcpy(&p[8], fd, sizeof(*fd));
627 /* Set the verb byte, have to substitute in the valid-bit */
628 p[0] = cl[0] | EQAR_VB(eqar);
630 qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
634 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
635 const struct qbman_eq_desc *d,
636 const struct qbman_fd *fd)
638 return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
641 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
642 const struct qbman_eq_desc *d,
643 const struct qbman_fd *fd)
646 const uint32_t *cl = qb_cl(d);
647 uint32_t eqcr_ci, full_mask, half_mask;
649 half_mask = (s->eqcr.pi_ci_mask>>1);
650 full_mask = s->eqcr.pi_ci_mask;
651 if (!s->eqcr.available) {
652 eqcr_ci = s->eqcr.ci;
653 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
654 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
655 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
656 eqcr_ci, s->eqcr.ci);
657 if (!s->eqcr.available)
661 p = qbman_cena_write_start_wo_shadow(&s->sys,
662 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
663 memcpy(&p[1], &cl[1], 28);
664 memcpy(&p[8], fd, sizeof(*fd));
667 /* Set the verb byte, have to substitute in the valid-bit */
668 p[0] = cl[0] | s->eqcr.pi_vb;
669 qbman_cena_write_complete_wo_shadow(&s->sys,
670 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
672 s->eqcr.pi &= full_mask;
674 if (!(s->eqcr.pi & half_mask))
675 s->eqcr.pi_vb ^= QB_VALID_BIT;
680 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
681 const struct qbman_eq_desc *d,
682 const struct qbman_fd *fd)
685 const uint32_t *cl = qb_cl(d);
686 uint32_t eqcr_ci, full_mask, half_mask;
688 half_mask = (s->eqcr.pi_ci_mask>>1);
689 full_mask = s->eqcr.pi_ci_mask;
690 if (!s->eqcr.available) {
691 eqcr_ci = s->eqcr.ci;
692 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
693 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
694 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
695 eqcr_ci, s->eqcr.ci);
696 if (!s->eqcr.available)
700 p = qbman_cena_write_start_wo_shadow(&s->sys,
701 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
702 memcpy(&p[1], &cl[1], 28);
703 memcpy(&p[8], fd, sizeof(*fd));
705 /* Set the verb byte, have to substitute in the valid-bit */
706 p[0] = cl[0] | s->eqcr.pi_vb;
708 s->eqcr.pi &= full_mask;
710 if (!(s->eqcr.pi & half_mask))
711 s->eqcr.pi_vb ^= QB_VALID_BIT;
713 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
714 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
718 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
719 const struct qbman_eq_desc *d,
720 const struct qbman_fd *fd)
722 return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
725 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
726 const struct qbman_fd *fd)
728 if (s->sys.eqcr_mode == qman_eqcr_vb_array)
729 return qbman_swp_enqueue_array_mode(s, d, fd);
730 else /* Use ring mode by default */
731 return qbman_swp_enqueue_ring_mode(s, d, fd);
734 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
735 const struct qbman_eq_desc *d,
736 const struct qbman_fd *fd,
741 const uint32_t *cl = qb_cl(d);
742 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
743 int i, num_enqueued = 0;
746 half_mask = (s->eqcr.pi_ci_mask>>1);
747 full_mask = s->eqcr.pi_ci_mask;
748 if (!s->eqcr.available) {
749 eqcr_ci = s->eqcr.ci;
750 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
751 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
752 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
753 eqcr_ci, s->eqcr.ci);
754 if (!s->eqcr.available)
758 eqcr_pi = s->eqcr.pi;
759 num_enqueued = (s->eqcr.available < num_frames) ?
760 s->eqcr.available : num_frames;
761 s->eqcr.available -= num_enqueued;
762 /* Fill in the EQCR ring */
763 for (i = 0; i < num_enqueued; i++) {
764 p = qbman_cena_write_start_wo_shadow(&s->sys,
765 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
766 memcpy(&p[1], &cl[1], 28);
767 memcpy(&p[8], &fd[i], sizeof(*fd));
773 /* Set the verb byte, have to substitute in the valid-bit */
774 eqcr_pi = s->eqcr.pi;
775 for (i = 0; i < num_enqueued; i++) {
776 p = qbman_cena_write_start_wo_shadow(&s->sys,
777 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
778 p[0] = cl[0] | s->eqcr.pi_vb;
779 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
780 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
782 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
783 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
786 if (!(eqcr_pi & half_mask))
787 s->eqcr.pi_vb ^= QB_VALID_BIT;
790 /* Flush all the cacheline without load/store in between */
791 eqcr_pi = s->eqcr.pi;
792 addr_cena = (size_t)s->sys.addr_cena;
793 for (i = 0; i < num_enqueued; i++) {
794 dcbf((uintptr_t)(addr_cena +
795 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
798 s->eqcr.pi = eqcr_pi & full_mask;
803 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
804 const struct qbman_eq_desc *d,
805 const struct qbman_fd *fd,
810 const uint32_t *cl = qb_cl(d);
811 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
812 int i, num_enqueued = 0;
814 half_mask = (s->eqcr.pi_ci_mask>>1);
815 full_mask = s->eqcr.pi_ci_mask;
816 if (!s->eqcr.available) {
817 eqcr_ci = s->eqcr.ci;
818 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
819 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
820 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
821 eqcr_ci, s->eqcr.ci);
822 if (!s->eqcr.available)
826 eqcr_pi = s->eqcr.pi;
827 num_enqueued = (s->eqcr.available < num_frames) ?
828 s->eqcr.available : num_frames;
829 s->eqcr.available -= num_enqueued;
830 /* Fill in the EQCR ring */
831 for (i = 0; i < num_enqueued; i++) {
832 p = qbman_cena_write_start_wo_shadow(&s->sys,
833 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
834 memcpy(&p[1], &cl[1], 28);
835 memcpy(&p[8], &fd[i], sizeof(*fd));
836 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
837 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
839 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
840 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
843 p[0] = cl[0] | s->eqcr.pi_vb;
845 if (!(eqcr_pi & half_mask))
846 s->eqcr.pi_vb ^= QB_VALID_BIT;
848 s->eqcr.pi = eqcr_pi & full_mask;
851 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
852 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
856 inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
857 const struct qbman_eq_desc *d,
858 const struct qbman_fd *fd,
862 return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
865 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
866 const struct qbman_eq_desc *d,
867 const struct qbman_fd *fd,
872 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
873 int i, num_enqueued = 0;
876 half_mask = (s->eqcr.pi_ci_mask>>1);
877 full_mask = s->eqcr.pi_ci_mask;
878 if (!s->eqcr.available) {
879 eqcr_ci = s->eqcr.ci;
880 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
881 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
882 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
883 eqcr_ci, s->eqcr.ci);
884 if (!s->eqcr.available)
888 eqcr_pi = s->eqcr.pi;
889 num_enqueued = (s->eqcr.available < num_frames) ?
890 s->eqcr.available : num_frames;
891 s->eqcr.available -= num_enqueued;
892 /* Fill in the EQCR ring */
893 for (i = 0; i < num_enqueued; i++) {
894 p = qbman_cena_write_start_wo_shadow(&s->sys,
895 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
897 memcpy(&p[1], &cl[1], 28);
898 memcpy(&p[8], &fd[i], sizeof(*fd));
904 /* Set the verb byte, have to substitute in the valid-bit */
905 eqcr_pi = s->eqcr.pi;
906 for (i = 0; i < num_enqueued; i++) {
907 p = qbman_cena_write_start_wo_shadow(&s->sys,
908 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
910 p[0] = cl[0] | s->eqcr.pi_vb;
912 if (!(eqcr_pi & half_mask))
913 s->eqcr.pi_vb ^= QB_VALID_BIT;
916 /* Flush all the cacheline without load/store in between */
917 eqcr_pi = s->eqcr.pi;
918 addr_cena = (size_t)s->sys.addr_cena;
919 for (i = 0; i < num_enqueued; i++) {
920 dcbf((uintptr_t)(addr_cena +
921 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
924 s->eqcr.pi = eqcr_pi & full_mask;
929 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
930 const struct qbman_eq_desc *d,
931 const struct qbman_fd *fd,
936 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
937 int i, num_enqueued = 0;
939 half_mask = (s->eqcr.pi_ci_mask>>1);
940 full_mask = s->eqcr.pi_ci_mask;
941 if (!s->eqcr.available) {
942 eqcr_ci = s->eqcr.ci;
943 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
944 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
945 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
946 eqcr_ci, s->eqcr.ci);
947 if (!s->eqcr.available)
951 eqcr_pi = s->eqcr.pi;
952 num_enqueued = (s->eqcr.available < num_frames) ?
953 s->eqcr.available : num_frames;
954 s->eqcr.available -= num_enqueued;
955 /* Fill in the EQCR ring */
956 for (i = 0; i < num_enqueued; i++) {
957 p = qbman_cena_write_start_wo_shadow(&s->sys,
958 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
960 memcpy(&p[1], &cl[1], 28);
961 memcpy(&p[8], &fd[i], sizeof(*fd));
965 /* Set the verb byte, have to substitute in the valid-bit */
966 eqcr_pi = s->eqcr.pi;
967 for (i = 0; i < num_enqueued; i++) {
968 p = qbman_cena_write_start_wo_shadow(&s->sys,
969 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
971 p[0] = cl[0] | s->eqcr.pi_vb;
973 if (!(eqcr_pi & half_mask))
974 s->eqcr.pi_vb ^= QB_VALID_BIT;
977 s->eqcr.pi = eqcr_pi & full_mask;
980 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
981 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
985 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
986 const struct qbman_eq_desc *d,
987 const struct qbman_fd *fd,
990 return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
993 /*************************/
994 /* Static (push) dequeue */
995 /*************************/
997 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
999 uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1001 QBMAN_BUG_ON(channel_idx > 15);
1002 *enabled = src | (1 << channel_idx);
1005 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1009 QBMAN_BUG_ON(channel_idx > 15);
1011 s->sdq |= 1 << channel_idx;
1013 s->sdq &= ~(1 << channel_idx);
1015 /* Read make the complete src map. If no channels are enabled
1016 * the SDQCR must be 0 or else QMan will assert errors
1018 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1020 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1022 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1025 /***************************/
1026 /* Volatile (pull) dequeue */
1027 /***************************/
1029 /* These should be const, eventually */
1030 #define QB_VDQCR_VERB_DCT_SHIFT 0
1031 #define QB_VDQCR_VERB_DT_SHIFT 2
1032 #define QB_VDQCR_VERB_RLS_SHIFT 4
1033 #define QB_VDQCR_VERB_WAE_SHIFT 5
1034 #define QB_VDQCR_VERB_RAD_SHIFT 6
1038 qb_pull_dt_workqueue,
1039 qb_pull_dt_framequeue
1042 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1044 memset(d, 0, sizeof(*d));
1047 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1048 struct qbman_result *storage,
1049 dma_addr_t storage_phys,
1052 d->pull.rsp_addr_virt = (size_t)storage;
1055 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1058 d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1060 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1062 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1064 d->pull.rsp_addr = storage_phys;
1067 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1070 d->pull.numf = numframes - 1;
1073 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1075 d->pull.tok = token;
1078 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1080 d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1081 d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1082 d->pull.dq_src = fqid;
1085 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1086 enum qbman_pull_type_e dct)
1088 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1089 d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1090 d->pull.dq_src = wqid;
1093 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1094 enum qbman_pull_type_e dct)
1096 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1097 d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1098 d->pull.dq_src = chid;
1101 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1103 if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1105 d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1107 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1109 printf("The RAD feature is not valid when RLS = 0\n");
1113 static int qbman_swp_pull_direct(struct qbman_swp *s,
1114 struct qbman_pull_desc *d)
1117 uint32_t *cl = qb_cl(d);
1119 if (!atomic_dec_and_test(&s->vdq.busy)) {
1120 atomic_inc(&s->vdq.busy);
1124 d->pull.tok = s->sys.idx + 1;
1125 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1126 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1127 memcpy(&p[1], &cl[1], 12);
1129 /* Set the verb byte, have to substitute in the valid-bit */
1131 p[0] = cl[0] | s->vdq.valid_bit;
1132 s->vdq.valid_bit ^= QB_VALID_BIT;
1133 qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1138 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1139 struct qbman_pull_desc *d)
1142 uint32_t *cl = qb_cl(d);
1144 if (!atomic_dec_and_test(&s->vdq.busy)) {
1145 atomic_inc(&s->vdq.busy);
1149 d->pull.tok = s->sys.idx + 1;
1150 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1151 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1152 memcpy(&p[1], &cl[1], 12);
1154 /* Set the verb byte, have to substitute in the valid-bit */
1155 p[0] = cl[0] | s->vdq.valid_bit;
1156 s->vdq.valid_bit ^= QB_VALID_BIT;
1158 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1163 inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1165 return qbman_swp_pull_ptr(s, d);
1172 #define QMAN_DQRR_PI_MASK 0xf
1174 #define QBMAN_RESULT_DQ 0x60
1175 #define QBMAN_RESULT_FQRN 0x21
1176 #define QBMAN_RESULT_FQRNI 0x22
1177 #define QBMAN_RESULT_FQPN 0x24
1178 #define QBMAN_RESULT_FQDAN 0x25
1179 #define QBMAN_RESULT_CDAN 0x26
1180 #define QBMAN_RESULT_CSCN_MEM 0x27
1181 #define QBMAN_RESULT_CGCU 0x28
1182 #define QBMAN_RESULT_BPSCN 0x29
1183 #define QBMAN_RESULT_CSCN_WQ 0x2a
1185 #include <rte_prefetch.h>
1187 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1189 const struct qbman_result *p;
1191 p = qbman_cena_read_wo_shadow(&s->sys,
1192 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1196 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1197 * only once, so repeated calls can return a sequence of DQRR entries, without
1198 * requiring they be consumed immediately or in any particular order.
1200 inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1202 return qbman_swp_dqrr_next_ptr(s);
1205 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1208 uint32_t response_verb;
1210 const struct qbman_result *p;
1212 /* Before using valid-bit to detect if something is there, we have to
1213 * handle the case of the DQRR reset bug...
1215 if (s->dqrr.reset_bug) {
1216 /* We pick up new entries by cache-inhibited producer index,
1217 * which means that a non-coherent mapping would require us to
1218 * invalidate and read *only* once that PI has indicated that
1219 * there's an entry here. The first trip around the DQRR ring
1220 * will be much less efficient than all subsequent trips around
1223 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
1226 /* there are new entries if pi != next_idx */
1227 if (pi == s->dqrr.next_idx)
1230 /* if next_idx is/was the last ring index, and 'pi' is
1231 * different, we can disable the workaround as all the ring
1232 * entries have now been DMA'd to so valid-bit checking is
1233 * repaired. Note: this logic needs to be based on next_idx
1234 * (which increments one at a time), rather than on pi (which
1235 * can burst and wrap-around between our snapshots of it).
1237 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
1238 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
1239 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1240 s->dqrr.next_idx, pi);
1241 s->dqrr.reset_bug = 0;
1243 qbman_cena_invalidate_prefetch(&s->sys,
1244 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1246 p = qbman_cena_read_wo_shadow(&s->sys,
1247 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1251 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1252 * in the DQRR reset bug workaround, we shouldn't need to skip these
1253 * check, because we've already determined that a new entry is available
1254 * and we've invalidated the cacheline before reading it, so the
1255 * valid-bit behaviour is repaired and should tell us what we already
1256 * knew from reading PI.
1258 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1261 /* There's something there. Move "next_idx" attention to the next ring
1262 * entry (and prefetch it) before returning what we found.
1265 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1266 s->dqrr.next_idx = 0;
1267 s->dqrr.valid_bit ^= QB_VALID_BIT;
1269 /* If this is the final response to a volatile dequeue command
1270 * indicate that the vdq is no longer busy
1273 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1274 if ((response_verb == QBMAN_RESULT_DQ) &&
1275 (flags & QBMAN_DQ_STAT_VOLATILE) &&
1276 (flags & QBMAN_DQ_STAT_EXPIRED))
1277 atomic_inc(&s->vdq.busy);
1282 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1285 uint32_t response_verb;
1287 const struct qbman_result *p;
1289 p = qbman_cena_read_wo_shadow(&s->sys,
1290 QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1294 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1295 * in the DQRR reset bug workaround, we shouldn't need to skip these
1296 * check, because we've already determined that a new entry is available
1297 * and we've invalidated the cacheline before reading it, so the
1298 * valid-bit behaviour is repaired and should tell us what we already
1299 * knew from reading PI.
1301 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1304 /* There's something there. Move "next_idx" attention to the next ring
1305 * entry (and prefetch it) before returning what we found.
1308 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1309 s->dqrr.next_idx = 0;
1310 s->dqrr.valid_bit ^= QB_VALID_BIT;
1312 /* If this is the final response to a volatile dequeue command
1313 * indicate that the vdq is no longer busy
1316 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1317 if ((response_verb == QBMAN_RESULT_DQ)
1318 && (flags & QBMAN_DQ_STAT_VOLATILE)
1319 && (flags & QBMAN_DQ_STAT_EXPIRED))
1320 atomic_inc(&s->vdq.busy);
1324 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1325 void qbman_swp_dqrr_consume(struct qbman_swp *s,
1326 const struct qbman_result *dq)
1328 qbman_cinh_write(&s->sys,
1329 QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1332 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1333 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
1336 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
1339 /*********************************/
1340 /* Polling user-provided storage */
1341 /*********************************/
1343 int qbman_result_has_new_result(struct qbman_swp *s,
1344 struct qbman_result *dq)
1346 if (dq->dq.tok == 0)
1350 * Set token to be 0 so we will detect change back to 1
1351 * next time the looping is traversed. Const is cast away here
1352 * as we want users to treat the dequeue responses as read only.
1354 ((struct qbman_result *)dq)->dq.tok = 0;
1357 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1358 * the fact "VDQCR" shows busy doesn't mean that we hold the result
1359 * that makes it available. Eg. we may be looking at our 10th dequeue
1360 * result, having released VDQCR after the 1st result and it is now
1361 * busy due to some other command!
1363 if (s->vdq.storage == dq) {
1364 s->vdq.storage = NULL;
1365 atomic_inc(&s->vdq.busy);
1371 int qbman_check_new_result(struct qbman_result *dq)
1373 if (dq->dq.tok == 0)
1377 * Set token to be 0 so we will detect change back to 1
1378 * next time the looping is traversed. Const is cast away here
1379 * as we want users to treat the dequeue responses as read only.
1381 ((struct qbman_result *)dq)->dq.tok = 0;
1386 int qbman_check_command_complete(struct qbman_result *dq)
1388 struct qbman_swp *s;
1390 if (dq->dq.tok == 0)
1393 s = portal_idx_map[dq->dq.tok - 1];
1395 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1396 * the fact "VDQCR" shows busy doesn't mean that we hold the result
1397 * that makes it available. Eg. we may be looking at our 10th dequeue
1398 * result, having released VDQCR after the 1st result and it is now
1399 * busy due to some other command!
1401 if (s->vdq.storage == dq) {
1402 s->vdq.storage = NULL;
1403 atomic_inc(&s->vdq.busy);
1409 /********************************/
1410 /* Categorising qbman results */
1411 /********************************/
1413 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1416 uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
1418 return (response_verb == x);
1421 int qbman_result_is_DQ(const struct qbman_result *dq)
1423 return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1426 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1428 return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1431 int qbman_result_is_CDAN(const struct qbman_result *dq)
1433 return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1436 int qbman_result_is_CSCN(const struct qbman_result *dq)
1438 return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
1439 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1442 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1444 return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
1447 int qbman_result_is_CGCU(const struct qbman_result *dq)
1449 return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
1452 int qbman_result_is_FQRN(const struct qbman_result *dq)
1454 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1457 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1459 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1462 int qbman_result_is_FQPN(const struct qbman_result *dq)
1464 return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1467 /*********************************/
1468 /* Parsing frame dequeue results */
1469 /*********************************/
1471 /* These APIs assume qbman_result_is_DQ() is TRUE */
1473 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1478 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1480 return dq->dq.seqnum;
1483 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1485 return dq->dq.oprid;
1488 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1493 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1495 return dq->dq.fq_byte_cnt;
1498 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1500 return dq->dq.fq_frm_cnt;
1503 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1505 return dq->dq.fqd_ctx;
1508 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1510 return (const struct qbman_fd *)&dq->dq.fd[0];
1513 /**************************************/
1514 /* Parsing state-change notifications */
1515 /**************************************/
1516 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1518 return scn->scn.state;
1521 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1523 return scn->scn.rid_tok;
1526 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1528 return scn->scn.ctx;
1534 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1536 return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1539 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1541 return !(int)(qbman_result_SCN_state(scn) & 0x1);
1544 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1546 return (int)(qbman_result_SCN_state(scn) & 0x2);
1549 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1551 return (int)(qbman_result_SCN_state(scn) & 0x4);
1554 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1556 return qbman_result_SCN_ctx(scn);
1562 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1564 return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
1567 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1569 return qbman_result_SCN_ctx(scn);
1572 /********************/
1573 /* Parsing EQ RESP */
1574 /********************/
1575 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
1577 return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
1580 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
1582 eqresp->eq_resp.rspid = val;
1585 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
1587 return eqresp->eq_resp.rspid;
1590 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
1592 if (eqresp->eq_resp.rc == 0xE)
1598 /******************/
1599 /* Buffer release */
1600 /******************/
1601 #define QB_BR_RC_VALID_SHIFT 5
1602 #define QB_BR_RCDI_SHIFT 6
1604 void qbman_release_desc_clear(struct qbman_release_desc *d)
1606 memset(d, 0, sizeof(*d));
1607 d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
1610 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
1615 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1618 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
1620 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
1623 #define RAR_IDX(rar) ((rar) & 0x7)
1624 #define RAR_VB(rar) ((rar) & 0x80)
1625 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1627 static int qbman_swp_release_direct(struct qbman_swp *s,
1628 const struct qbman_release_desc *d,
1629 const uint64_t *buffers,
1630 unsigned int num_buffers)
1633 const uint32_t *cl = qb_cl(d);
1634 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1636 pr_debug("RAR=%08x\n", rar);
1637 if (!RAR_SUCCESS(rar))
1640 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1642 /* Start the release command */
1643 p = qbman_cena_write_start_wo_shadow(&s->sys,
1644 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1646 /* Copy the caller's buffer pointers to the command */
1647 u64_to_le32_copy(&p[2], buffers, num_buffers);
1649 /* Set the verb byte, have to substitute in the valid-bit and the
1650 * number of buffers.
1653 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1654 qbman_cena_write_complete_wo_shadow(&s->sys,
1655 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1660 static int qbman_swp_release_mem_back(struct qbman_swp *s,
1661 const struct qbman_release_desc *d,
1662 const uint64_t *buffers,
1663 unsigned int num_buffers)
1666 const uint32_t *cl = qb_cl(d);
1667 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1669 pr_debug("RAR=%08x\n", rar);
1670 if (!RAR_SUCCESS(rar))
1673 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1675 /* Start the release command */
1676 p = qbman_cena_write_start_wo_shadow(&s->sys,
1677 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1679 /* Copy the caller's buffer pointers to the command */
1680 u64_to_le32_copy(&p[2], buffers, num_buffers);
1682 /* Set the verb byte, have to substitute in the valid-bit and the
1683 * number of buffers.
1685 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1687 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
1688 RAR_IDX(rar) * 4, QMAN_RT_MODE);
1693 inline int qbman_swp_release(struct qbman_swp *s,
1694 const struct qbman_release_desc *d,
1695 const uint64_t *buffers,
1696 unsigned int num_buffers)
1698 return qbman_swp_release_ptr(s, d, buffers, num_buffers);
1701 /*******************/
1702 /* Buffer acquires */
1703 /*******************/
1704 struct qbman_acquire_desc {
1709 uint8_t reserved2[59];
1712 struct qbman_acquire_rslt {
1717 uint8_t reserved2[3];
1721 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1722 unsigned int num_buffers)
1724 struct qbman_acquire_desc *p;
1725 struct qbman_acquire_rslt *r;
1727 if (!num_buffers || (num_buffers > 7))
1730 /* Start the management command */
1731 p = qbman_swp_mc_start(s);
1736 /* Encode the caller-provided attributes */
1738 p->num = num_buffers;
1740 /* Complete the management command */
1741 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1743 pr_err("qbman: acquire from BPID %d failed, no response\n",
1748 /* Decode the outcome */
1749 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
1751 /* Determine success or failure */
1752 if (r->rslt != QBMAN_MC_RSLT_OK) {
1753 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1758 QBMAN_BUG_ON(r->num > num_buffers);
1760 /* Copy the acquired buffers to the caller's array */
1761 u64_from_le32_copy(buffers, &r->buf[0], r->num);
1769 struct qbman_alt_fq_state_desc {
1771 uint8_t reserved[3];
1773 uint8_t reserved2[56];
1776 struct qbman_alt_fq_state_rslt {
1779 uint8_t reserved[62];
1782 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1784 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1785 uint8_t alt_fq_verb)
1787 struct qbman_alt_fq_state_desc *p;
1788 struct qbman_alt_fq_state_rslt *r;
1790 /* Start the management command */
1791 p = qbman_swp_mc_start(s);
1795 p->fqid = fqid & ALT_FQ_FQID_MASK;
1797 /* Complete the management command */
1798 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1800 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1805 /* Decode the outcome */
1806 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
1808 /* Determine success or failure */
1809 if (r->rslt != QBMAN_MC_RSLT_OK) {
1810 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1811 fqid, alt_fq_verb, r->rslt);
1818 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1820 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1823 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1825 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1828 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1830 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1833 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1835 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1838 /**********************/
1839 /* Channel management */
1840 /**********************/
1842 struct qbman_cdan_ctrl_desc {
1850 uint8_t reserved3[48];
1854 struct qbman_cdan_ctrl_rslt {
1858 uint8_t reserved[60];
1861 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1862 * would be irresponsible to expose it.
1864 #define CODE_CDAN_WE_EN 0x1
1865 #define CODE_CDAN_WE_CTX 0x4
1867 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1868 uint8_t we_mask, uint8_t cdan_en,
1871 struct qbman_cdan_ctrl_desc *p;
1872 struct qbman_cdan_ctrl_rslt *r;
1874 /* Start the management command */
1875 p = qbman_swp_mc_start(s);
1879 /* Encode the caller-provided attributes */
1888 /* Complete the management command */
1889 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1891 pr_err("qbman: wqchan config failed, no response\n");
1895 /* Decode the outcome */
1896 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
1897 != QBMAN_WQCHAN_CONFIGURE);
1899 /* Determine success or failure */
1900 if (r->rslt != QBMAN_MC_RSLT_OK) {
1901 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1902 channelid, r->rslt);
1909 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1912 return qbman_swp_CDAN_set(s, channelid,
1917 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1919 return qbman_swp_CDAN_set(s, channelid,
1924 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1926 return qbman_swp_CDAN_set(s, channelid,
1931 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1934 return qbman_swp_CDAN_set(s, channelid,
1935 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1939 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1941 return QBMAN_IDX_FROM_DQRR(dqrr);
1944 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1946 struct qbman_result *dq;
1948 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));