1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
9 #include "qbman_portal.h"
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE 0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE 0x48
20 #define QBMAN_FQ_FORCE 0x49
21 #define QBMAN_FQ_XON 0x4d
22 #define QBMAN_FQ_XOFF 0x4e
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
28 #define QBMAN_RESPONSE_VERB_MASK 0x7f
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT 29
34 #define QB_SDQCR_FC_MASK 0x1
35 #define QB_SDQCR_DCT_SHIFT 24
36 #define QB_SDQCR_DCT_MASK 0x3
37 #define QB_SDQCR_TOK_SHIFT 16
38 #define QB_SDQCR_TOK_MASK 0xff
39 #define QB_SDQCR_SRC_SHIFT 0
40 #define QB_SDQCR_SRC_MASK 0xffff
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN 0xbb
45 enum qbman_sdqcr_dct {
46 qbman_sdqcr_dct_null = 0,
47 qbman_sdqcr_dct_prio_ics,
48 qbman_sdqcr_dct_active_ics,
49 qbman_sdqcr_dct_active
53 qbman_sdqcr_fc_one = 0,
54 qbman_sdqcr_fc_up_to_3 = 1
57 /* We need to keep track of which SWP triggered a pull command
58 * so keep an array of portal IDs and use the token field to
59 * be able to find the proper portal
61 #define MAX_QBMAN_PORTALS 64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
64 /* Internal Function declaration */
66 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
67 const struct qbman_eq_desc *d,
68 const struct qbman_fd *fd);
70 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
71 const struct qbman_eq_desc *d,
72 const struct qbman_fd *fd);
75 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
76 const struct qbman_eq_desc *d,
77 const struct qbman_fd *fd);
79 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
80 const struct qbman_eq_desc *d,
81 const struct qbman_fd *fd);
84 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
85 const struct qbman_eq_desc *d,
86 const struct qbman_fd *fd,
90 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
91 const struct qbman_eq_desc *d,
92 const struct qbman_fd *fd,
97 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
98 const struct qbman_eq_desc *d,
99 const struct qbman_fd *fd,
102 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
103 const struct qbman_eq_desc *d,
104 const struct qbman_fd *fd,
108 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
110 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
112 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
113 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
116 qbman_swp_release_direct(struct qbman_swp *s,
117 const struct qbman_release_desc *d,
118 const uint64_t *buffers, unsigned int num_buffers);
120 qbman_swp_release_mem_back(struct qbman_swp *s,
121 const struct qbman_release_desc *d,
122 const uint64_t *buffers, unsigned int num_buffers);
124 /* Function pointers */
125 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
126 const struct qbman_eq_desc *d,
127 const struct qbman_fd *fd)
128 = qbman_swp_enqueue_array_mode_direct;
130 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
131 const struct qbman_eq_desc *d,
132 const struct qbman_fd *fd)
133 = qbman_swp_enqueue_ring_mode_direct;
135 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
136 const struct qbman_eq_desc *d,
137 const struct qbman_fd *fd,
140 = qbman_swp_enqueue_multiple_direct;
142 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
143 const struct qbman_eq_desc *d,
144 const struct qbman_fd *fd,
146 = qbman_swp_enqueue_multiple_desc_direct;
148 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
149 struct qbman_pull_desc *d)
150 = qbman_swp_pull_direct;
152 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
153 = qbman_swp_dqrr_next_direct;
155 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
156 const struct qbman_release_desc *d,
157 const uint64_t *buffers, unsigned int num_buffers)
158 = qbman_swp_release_direct;
160 /*********************************/
161 /* Portal constructor/destructor */
162 /*********************************/
164 /* Software portals should always be in the power-on state when we initialise,
165 * due to the CCSR-based portal reset functionality that MC has.
167 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
168 * valid-bits, so we need to support a workaround where we don't trust
169 * valid-bits when detecting new entries until any stale ring entries have been
170 * overwritten at least once. The idea is that we read PI for the first few
171 * entries, then switch to valid-bit after that. The trick is to clear the
172 * bug-work-around boolean once the PI wraps around the ring for the first time.
174 * Note: this still carries a slight additional cost once the decrementer hits
177 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
182 struct qbman_swp *p = malloc(sizeof(*p));
187 memset(p, 0, sizeof(struct qbman_swp));
190 #ifdef QBMAN_CHECKING
191 p->mc.check = swp_mc_can_start;
193 p->mc.valid_bit = QB_VALID_BIT;
194 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
195 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
196 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
197 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
198 p->mr.valid_bit = QB_VALID_BIT;
200 atomic_set(&p->vdq.busy, 1);
201 p->vdq.valid_bit = QB_VALID_BIT;
202 p->dqrr.valid_bit = QB_VALID_BIT;
203 qman_version = p->desc.qman_version;
204 if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
205 p->dqrr.dqrr_size = 4;
206 p->dqrr.reset_bug = 1;
208 p->dqrr.dqrr_size = 8;
209 p->dqrr.reset_bug = 0;
212 ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
215 pr_err("qbman_swp_sys_init() failed %d\n", ret);
219 /* Verify that the DQRRPI is 0 - if it is not the portal isn't
220 * in default state which is an error
222 if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
223 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
228 /* SDQCR needs to be initialized to 0 when no channels are
229 * being dequeued from or else the QMan HW will indicate an
230 * error. The values that were calculated above will be
231 * applied when dequeues from a specific channel are enabled.
233 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
235 p->eqcr.pi_ring_size = 8;
236 if ((qman_version & 0xFFFF0000) >= QMAN_REV_5000) {
237 p->eqcr.pi_ring_size = 32;
238 qbman_swp_enqueue_array_mode_ptr =
239 qbman_swp_enqueue_array_mode_mem_back;
240 qbman_swp_enqueue_ring_mode_ptr =
241 qbman_swp_enqueue_ring_mode_mem_back;
242 qbman_swp_enqueue_multiple_ptr =
243 qbman_swp_enqueue_multiple_mem_back;
244 qbman_swp_enqueue_multiple_desc_ptr =
245 qbman_swp_enqueue_multiple_desc_mem_back;
246 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
247 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
248 qbman_swp_release_ptr = qbman_swp_release_mem_back;
251 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
252 p->eqcr.pi_mask = (p->eqcr.pi_mask<<1) + 1;
253 eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
254 p->eqcr.pi = eqcr_pi & p->eqcr.pi_mask;
255 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
256 if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
257 p->eqcr.ci = qbman_cinh_read(&p->sys,
258 QBMAN_CINH_SWP_EQCR_CI) & p->eqcr.pi_mask;
260 p->eqcr.ci = qbman_cinh_read(&p->sys,
261 QBMAN_CINH_SWP_EQCR_PI) & p->eqcr.pi_mask;
262 p->eqcr.available = p->eqcr.pi_ring_size -
263 qm_cyc_diff(p->eqcr.pi_ring_size,
264 p->eqcr.ci & (p->eqcr.pi_mask<<1),
265 p->eqcr.pi & (p->eqcr.pi_mask<<1));
267 portal_idx_map[p->desc.idx] = p;
271 void qbman_swp_finish(struct qbman_swp *p)
273 #ifdef QBMAN_CHECKING
274 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
276 qbman_swp_sys_finish(&p->sys);
277 portal_idx_map[p->desc.idx] = NULL;
281 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
290 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
292 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
295 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
297 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
300 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
302 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
305 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
307 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
310 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
312 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
315 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
317 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
320 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
322 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
325 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
327 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
330 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
332 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
335 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
337 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
340 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
342 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
345 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
347 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
348 inhibit ? 0xffffffff : 0);
351 /***********************/
352 /* Management commands */
353 /***********************/
356 * Internal code common to all types of management commands.
359 void *qbman_swp_mc_start(struct qbman_swp *p)
362 #ifdef QBMAN_CHECKING
363 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
365 if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
366 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
368 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
369 #ifdef QBMAN_CHECKING
371 p->mc.check = swp_mc_can_submit;
376 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
379 #ifdef QBMAN_CHECKING
380 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
382 /* TBD: "|=" is going to hurt performance. Need to move as many fields
383 * out of word zero, and for those that remain, the "OR" needs to occur
384 * at the caller side. This debug check helps to catch cases where the
385 * caller wants to OR but has forgotten to do so.
387 QBMAN_BUG_ON((*v & cmd_verb) != *v);
388 if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
390 *v = cmd_verb | p->mc.valid_bit;
391 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
394 *v = cmd_verb | p->mr.valid_bit;
395 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
397 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
399 #ifdef QBMAN_CHECKING
400 p->mc.check = swp_mc_can_poll;
404 void *qbman_swp_mc_result(struct qbman_swp *p)
407 #ifdef QBMAN_CHECKING
408 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
410 if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
411 qbman_cena_invalidate_prefetch(&p->sys,
412 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
413 ret = qbman_cena_read(&p->sys,
414 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
415 /* Remove the valid-bit -
416 * command completed iff the rest is non-zero
418 verb = ret[0] & ~QB_VALID_BIT;
421 p->mc.valid_bit ^= QB_VALID_BIT;
423 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
424 /* Command completed if the valid bit is toggled */
425 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
427 /* Remove the valid-bit -
428 * command completed iff the rest is non-zero
430 verb = ret[0] & ~QB_VALID_BIT;
433 p->mr.valid_bit ^= QB_VALID_BIT;
435 #ifdef QBMAN_CHECKING
436 p->mc.check = swp_mc_can_start;
445 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
446 enum qb_enqueue_commands {
448 enqueue_response_always = 1,
449 enqueue_rejects_to_fq = 2
452 #define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
453 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
454 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
455 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
456 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
457 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
458 #define QB_ENQUEUE_CMD_NLIS_SHIFT 14
459 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
461 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
463 memset(d, 0, sizeof(*d));
466 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
468 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
470 d->eq.verb |= enqueue_response_always;
472 d->eq.verb |= enqueue_rejects_to_fq;
475 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
476 uint16_t opr_id, uint16_t seqnum, int incomplete)
478 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
480 d->eq.verb |= enqueue_response_always;
482 d->eq.verb |= enqueue_rejects_to_fq;
484 d->eq.orpid = opr_id;
485 d->eq.seqnum = seqnum;
487 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
489 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
492 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
495 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
496 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
497 d->eq.orpid = opr_id;
498 d->eq.seqnum = seqnum;
499 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
500 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
503 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
506 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
507 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
508 d->eq.orpid = opr_id;
509 d->eq.seqnum = seqnum;
510 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
511 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
514 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
515 dma_addr_t storage_phys,
518 d->eq.rsp_addr = storage_phys;
522 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
527 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
529 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
533 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
534 uint16_t qd_bin, uint8_t qd_prio)
536 d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
538 d->eq.qdbin = qd_bin;
539 d->eq.qpri = qd_prio;
542 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
545 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
547 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
550 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
551 uint8_t dqrr_idx, int park)
554 d->eq.dca = dqrr_idx;
556 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
558 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
559 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
561 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
565 #define EQAR_IDX(eqar) ((eqar) & 0x1f)
566 #define EQAR_VB(eqar) ((eqar) & 0x80)
567 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
569 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
573 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
576 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
582 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
583 const struct qbman_eq_desc *d,
584 const struct qbman_fd *fd)
587 const uint32_t *cl = qb_cl(d);
588 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
590 pr_debug("EQAR=%08x\n", eqar);
591 if (!EQAR_SUCCESS(eqar))
593 p = qbman_cena_write_start_wo_shadow(&s->sys,
594 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
595 memcpy(&p[1], &cl[1], 28);
596 memcpy(&p[8], fd, sizeof(*fd));
598 /* Set the verb byte, have to substitute in the valid-bit */
600 p[0] = cl[0] | EQAR_VB(eqar);
601 qbman_cena_write_complete_wo_shadow(&s->sys,
602 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
605 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
606 const struct qbman_eq_desc *d,
607 const struct qbman_fd *fd)
610 const uint32_t *cl = qb_cl(d);
611 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
613 pr_debug("EQAR=%08x\n", eqar);
614 if (!EQAR_SUCCESS(eqar))
616 p = qbman_cena_write_start_wo_shadow(&s->sys,
617 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
618 memcpy(&p[1], &cl[1], 28);
619 memcpy(&p[8], fd, sizeof(*fd));
621 /* Set the verb byte, have to substitute in the valid-bit */
622 p[0] = cl[0] | EQAR_VB(eqar);
624 qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
628 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
629 const struct qbman_eq_desc *d,
630 const struct qbman_fd *fd)
632 return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
635 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
636 const struct qbman_eq_desc *d,
637 const struct qbman_fd *fd)
640 const uint32_t *cl = qb_cl(d);
641 uint32_t eqcr_ci, full_mask, half_mask;
643 half_mask = (s->eqcr.pi_mask>>1);
644 full_mask = s->eqcr.pi_mask;
645 if (!s->eqcr.available) {
646 eqcr_ci = s->eqcr.ci;
647 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
648 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
649 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
650 eqcr_ci, s->eqcr.ci);
651 if (!s->eqcr.available)
655 p = qbman_cena_write_start_wo_shadow(&s->sys,
656 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
657 memcpy(&p[1], &cl[1], 28);
658 memcpy(&p[8], fd, sizeof(*fd));
661 /* Set the verb byte, have to substitute in the valid-bit */
662 p[0] = cl[0] | s->eqcr.pi_vb;
663 qbman_cena_write_complete_wo_shadow(&s->sys,
664 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
666 s->eqcr.pi &= full_mask;
668 if (!(s->eqcr.pi & half_mask))
669 s->eqcr.pi_vb ^= QB_VALID_BIT;
674 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
675 const struct qbman_eq_desc *d,
676 const struct qbman_fd *fd)
679 const uint32_t *cl = qb_cl(d);
680 uint32_t eqcr_ci, full_mask, half_mask;
682 half_mask = (s->eqcr.pi_mask>>1);
683 full_mask = s->eqcr.pi_mask;
684 if (!s->eqcr.available) {
685 eqcr_ci = s->eqcr.ci;
686 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
687 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
688 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
689 eqcr_ci, s->eqcr.ci);
690 if (!s->eqcr.available)
694 p = qbman_cena_write_start_wo_shadow(&s->sys,
695 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
696 memcpy(&p[1], &cl[1], 28);
697 memcpy(&p[8], fd, sizeof(*fd));
699 /* Set the verb byte, have to substitute in the valid-bit */
700 p[0] = cl[0] | s->eqcr.pi_vb;
702 s->eqcr.pi &= full_mask;
704 if (!(s->eqcr.pi & half_mask))
705 s->eqcr.pi_vb ^= QB_VALID_BIT;
707 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
708 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
712 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
713 const struct qbman_eq_desc *d,
714 const struct qbman_fd *fd)
716 return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
719 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
720 const struct qbman_fd *fd)
722 if (s->sys.eqcr_mode == qman_eqcr_vb_array)
723 return qbman_swp_enqueue_array_mode(s, d, fd);
724 else /* Use ring mode by default */
725 return qbman_swp_enqueue_ring_mode(s, d, fd);
728 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
729 const struct qbman_eq_desc *d,
730 const struct qbman_fd *fd,
735 const uint32_t *cl = qb_cl(d);
736 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
737 int i, num_enqueued = 0;
740 half_mask = (s->eqcr.pi_mask>>1);
741 full_mask = s->eqcr.pi_mask;
742 if (!s->eqcr.available) {
743 eqcr_ci = s->eqcr.ci;
744 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
745 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
746 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
747 eqcr_ci, s->eqcr.ci);
748 if (!s->eqcr.available)
752 eqcr_pi = s->eqcr.pi;
753 num_enqueued = (s->eqcr.available < num_frames) ?
754 s->eqcr.available : num_frames;
755 s->eqcr.available -= num_enqueued;
756 /* Fill in the EQCR ring */
757 for (i = 0; i < num_enqueued; i++) {
758 p = qbman_cena_write_start_wo_shadow(&s->sys,
759 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
760 memcpy(&p[1], &cl[1], 28);
761 memcpy(&p[8], &fd[i], sizeof(*fd));
767 /* Set the verb byte, have to substitute in the valid-bit */
768 eqcr_pi = s->eqcr.pi;
769 for (i = 0; i < num_enqueued; i++) {
770 p = qbman_cena_write_start_wo_shadow(&s->sys,
771 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
772 p[0] = cl[0] | s->eqcr.pi_vb;
773 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
774 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
776 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
777 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
780 if (!(eqcr_pi & half_mask))
781 s->eqcr.pi_vb ^= QB_VALID_BIT;
784 /* Flush all the cacheline without load/store in between */
785 eqcr_pi = s->eqcr.pi;
786 addr_cena = (size_t)s->sys.addr_cena;
787 for (i = 0; i < num_enqueued; i++) {
788 dcbf((uintptr_t)(addr_cena +
789 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
792 s->eqcr.pi = eqcr_pi & full_mask;
797 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
798 const struct qbman_eq_desc *d,
799 const struct qbman_fd *fd,
804 const uint32_t *cl = qb_cl(d);
805 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
806 int i, num_enqueued = 0;
808 half_mask = (s->eqcr.pi_mask>>1);
809 full_mask = s->eqcr.pi_mask;
810 if (!s->eqcr.available) {
811 eqcr_ci = s->eqcr.ci;
812 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
813 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
814 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
815 eqcr_ci, s->eqcr.ci);
816 if (!s->eqcr.available)
820 eqcr_pi = s->eqcr.pi;
821 num_enqueued = (s->eqcr.available < num_frames) ?
822 s->eqcr.available : num_frames;
823 s->eqcr.available -= num_enqueued;
824 /* Fill in the EQCR ring */
825 for (i = 0; i < num_enqueued; i++) {
826 p = qbman_cena_write_start_wo_shadow(&s->sys,
827 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
828 memcpy(&p[1], &cl[1], 28);
829 memcpy(&p[8], &fd[i], sizeof(*fd));
833 /* Set the verb byte, have to substitute in the valid-bit */
834 eqcr_pi = s->eqcr.pi;
835 for (i = 0; i < num_enqueued; i++) {
836 p = qbman_cena_write_start_wo_shadow(&s->sys,
837 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
838 p[0] = cl[0] | s->eqcr.pi_vb;
839 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
840 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
842 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
843 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
846 if (!(eqcr_pi & half_mask))
847 s->eqcr.pi_vb ^= QB_VALID_BIT;
849 s->eqcr.pi = eqcr_pi & full_mask;
852 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
853 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
857 inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
858 const struct qbman_eq_desc *d,
859 const struct qbman_fd *fd,
863 return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
866 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
867 const struct qbman_eq_desc *d,
868 const struct qbman_fd *fd,
873 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
874 int i, num_enqueued = 0;
877 half_mask = (s->eqcr.pi_mask>>1);
878 full_mask = s->eqcr.pi_mask;
879 if (!s->eqcr.available) {
880 eqcr_ci = s->eqcr.ci;
881 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
882 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
883 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
884 eqcr_ci, s->eqcr.ci);
885 if (!s->eqcr.available)
889 eqcr_pi = s->eqcr.pi;
890 num_enqueued = (s->eqcr.available < num_frames) ?
891 s->eqcr.available : num_frames;
892 s->eqcr.available -= num_enqueued;
893 /* Fill in the EQCR ring */
894 for (i = 0; i < num_enqueued; i++) {
895 p = qbman_cena_write_start_wo_shadow(&s->sys,
896 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
898 memcpy(&p[1], &cl[1], 28);
899 memcpy(&p[8], &fd[i], sizeof(*fd));
905 /* Set the verb byte, have to substitute in the valid-bit */
906 eqcr_pi = s->eqcr.pi;
907 for (i = 0; i < num_enqueued; i++) {
908 p = qbman_cena_write_start_wo_shadow(&s->sys,
909 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
911 p[0] = cl[0] | s->eqcr.pi_vb;
913 if (!(eqcr_pi & half_mask))
914 s->eqcr.pi_vb ^= QB_VALID_BIT;
917 /* Flush all the cacheline without load/store in between */
918 eqcr_pi = s->eqcr.pi;
919 addr_cena = (size_t)s->sys.addr_cena;
920 for (i = 0; i < num_enqueued; i++) {
921 dcbf((uintptr_t)(addr_cena +
922 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
925 s->eqcr.pi = eqcr_pi & full_mask;
930 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
931 const struct qbman_eq_desc *d,
932 const struct qbman_fd *fd,
937 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
938 int i, num_enqueued = 0;
940 half_mask = (s->eqcr.pi_mask>>1);
941 full_mask = s->eqcr.pi_mask;
942 if (!s->eqcr.available) {
943 eqcr_ci = s->eqcr.ci;
944 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
945 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
946 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
947 eqcr_ci, s->eqcr.ci);
948 if (!s->eqcr.available)
952 eqcr_pi = s->eqcr.pi;
953 num_enqueued = (s->eqcr.available < num_frames) ?
954 s->eqcr.available : num_frames;
955 s->eqcr.available -= num_enqueued;
956 /* Fill in the EQCR ring */
957 for (i = 0; i < num_enqueued; i++) {
958 p = qbman_cena_write_start_wo_shadow(&s->sys,
959 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
961 memcpy(&p[1], &cl[1], 28);
962 memcpy(&p[8], &fd[i], sizeof(*fd));
966 /* Set the verb byte, have to substitute in the valid-bit */
967 eqcr_pi = s->eqcr.pi;
968 for (i = 0; i < num_enqueued; i++) {
969 p = qbman_cena_write_start_wo_shadow(&s->sys,
970 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
972 p[0] = cl[0] | s->eqcr.pi_vb;
974 if (!(eqcr_pi & half_mask))
975 s->eqcr.pi_vb ^= QB_VALID_BIT;
978 s->eqcr.pi = eqcr_pi & full_mask;
981 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
982 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
986 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
987 const struct qbman_eq_desc *d,
988 const struct qbman_fd *fd,
991 return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
994 /*************************/
995 /* Static (push) dequeue */
996 /*************************/
998 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1000 uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1002 QBMAN_BUG_ON(channel_idx > 15);
1003 *enabled = src | (1 << channel_idx);
1006 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1010 QBMAN_BUG_ON(channel_idx > 15);
1012 s->sdq |= 1 << channel_idx;
1014 s->sdq &= ~(1 << channel_idx);
1016 /* Read make the complete src map. If no channels are enabled
1017 * the SDQCR must be 0 or else QMan will assert errors
1019 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1021 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1023 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1026 /***************************/
1027 /* Volatile (pull) dequeue */
1028 /***************************/
1030 /* These should be const, eventually */
1031 #define QB_VDQCR_VERB_DCT_SHIFT 0
1032 #define QB_VDQCR_VERB_DT_SHIFT 2
1033 #define QB_VDQCR_VERB_RLS_SHIFT 4
1034 #define QB_VDQCR_VERB_WAE_SHIFT 5
1035 #define QB_VDQCR_VERB_RAD_SHIFT 6
1039 qb_pull_dt_workqueue,
1040 qb_pull_dt_framequeue
1043 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1045 memset(d, 0, sizeof(*d));
1048 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1049 struct qbman_result *storage,
1050 dma_addr_t storage_phys,
1053 d->pull.rsp_addr_virt = (size_t)storage;
1056 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1059 d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1061 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1063 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1065 d->pull.rsp_addr = storage_phys;
1068 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1071 d->pull.numf = numframes - 1;
1074 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1076 d->pull.tok = token;
1079 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1081 d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1082 d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1083 d->pull.dq_src = fqid;
1086 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1087 enum qbman_pull_type_e dct)
1089 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1090 d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1091 d->pull.dq_src = wqid;
1094 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1095 enum qbman_pull_type_e dct)
1097 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1098 d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1099 d->pull.dq_src = chid;
1102 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1104 if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1106 d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1108 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1110 printf("The RAD feature is not valid when RLS = 0\n");
1114 static int qbman_swp_pull_direct(struct qbman_swp *s,
1115 struct qbman_pull_desc *d)
1118 uint32_t *cl = qb_cl(d);
1120 if (!atomic_dec_and_test(&s->vdq.busy)) {
1121 atomic_inc(&s->vdq.busy);
1125 d->pull.tok = s->sys.idx + 1;
1126 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1127 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1128 memcpy(&p[1], &cl[1], 12);
1130 /* Set the verb byte, have to substitute in the valid-bit */
1132 p[0] = cl[0] | s->vdq.valid_bit;
1133 s->vdq.valid_bit ^= QB_VALID_BIT;
1134 qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1139 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1140 struct qbman_pull_desc *d)
1143 uint32_t *cl = qb_cl(d);
1145 if (!atomic_dec_and_test(&s->vdq.busy)) {
1146 atomic_inc(&s->vdq.busy);
1150 d->pull.tok = s->sys.idx + 1;
1151 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1152 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1153 memcpy(&p[1], &cl[1], 12);
1155 /* Set the verb byte, have to substitute in the valid-bit */
1156 p[0] = cl[0] | s->vdq.valid_bit;
1157 s->vdq.valid_bit ^= QB_VALID_BIT;
1159 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1164 inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1166 return qbman_swp_pull_ptr(s, d);
1173 #define QMAN_DQRR_PI_MASK 0xf
1175 #define QBMAN_RESULT_DQ 0x60
1176 #define QBMAN_RESULT_FQRN 0x21
1177 #define QBMAN_RESULT_FQRNI 0x22
1178 #define QBMAN_RESULT_FQPN 0x24
1179 #define QBMAN_RESULT_FQDAN 0x25
1180 #define QBMAN_RESULT_CDAN 0x26
1181 #define QBMAN_RESULT_CSCN_MEM 0x27
1182 #define QBMAN_RESULT_CGCU 0x28
1183 #define QBMAN_RESULT_BPSCN 0x29
1184 #define QBMAN_RESULT_CSCN_WQ 0x2a
1186 #include <rte_prefetch.h>
1188 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1190 const struct qbman_result *p;
1192 p = qbman_cena_read_wo_shadow(&s->sys,
1193 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1197 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1198 * only once, so repeated calls can return a sequence of DQRR entries, without
1199 * requiring they be consumed immediately or in any particular order.
1201 inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1203 return qbman_swp_dqrr_next_ptr(s);
1206 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1209 uint32_t response_verb;
1211 const struct qbman_result *p;
1213 /* Before using valid-bit to detect if something is there, we have to
1214 * handle the case of the DQRR reset bug...
1216 if (s->dqrr.reset_bug) {
1217 /* We pick up new entries by cache-inhibited producer index,
1218 * which means that a non-coherent mapping would require us to
1219 * invalidate and read *only* once that PI has indicated that
1220 * there's an entry here. The first trip around the DQRR ring
1221 * will be much less efficient than all subsequent trips around
1224 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
1227 /* there are new entries if pi != next_idx */
1228 if (pi == s->dqrr.next_idx)
1231 /* if next_idx is/was the last ring index, and 'pi' is
1232 * different, we can disable the workaround as all the ring
1233 * entries have now been DMA'd to so valid-bit checking is
1234 * repaired. Note: this logic needs to be based on next_idx
1235 * (which increments one at a time), rather than on pi (which
1236 * can burst and wrap-around between our snapshots of it).
1238 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
1239 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
1240 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1241 s->dqrr.next_idx, pi);
1242 s->dqrr.reset_bug = 0;
1244 qbman_cena_invalidate_prefetch(&s->sys,
1245 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1247 p = qbman_cena_read_wo_shadow(&s->sys,
1248 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1252 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1253 * in the DQRR reset bug workaround, we shouldn't need to skip these
1254 * check, because we've already determined that a new entry is available
1255 * and we've invalidated the cacheline before reading it, so the
1256 * valid-bit behaviour is repaired and should tell us what we already
1257 * knew from reading PI.
1259 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1262 /* There's something there. Move "next_idx" attention to the next ring
1263 * entry (and prefetch it) before returning what we found.
1266 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1267 s->dqrr.next_idx = 0;
1268 s->dqrr.valid_bit ^= QB_VALID_BIT;
1270 /* If this is the final response to a volatile dequeue command
1271 * indicate that the vdq is no longer busy
1274 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1275 if ((response_verb == QBMAN_RESULT_DQ) &&
1276 (flags & QBMAN_DQ_STAT_VOLATILE) &&
1277 (flags & QBMAN_DQ_STAT_EXPIRED))
1278 atomic_inc(&s->vdq.busy);
1283 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1286 uint32_t response_verb;
1288 const struct qbman_result *p;
1290 p = qbman_cena_read_wo_shadow(&s->sys,
1291 QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1295 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1296 * in the DQRR reset bug workaround, we shouldn't need to skip these
1297 * check, because we've already determined that a new entry is available
1298 * and we've invalidated the cacheline before reading it, so the
1299 * valid-bit behaviour is repaired and should tell us what we already
1300 * knew from reading PI.
1302 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1305 /* There's something there. Move "next_idx" attention to the next ring
1306 * entry (and prefetch it) before returning what we found.
1309 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1310 s->dqrr.next_idx = 0;
1311 s->dqrr.valid_bit ^= QB_VALID_BIT;
1313 /* If this is the final response to a volatile dequeue command
1314 * indicate that the vdq is no longer busy
1317 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1318 if ((response_verb == QBMAN_RESULT_DQ) &&
1319 (flags & QBMAN_DQ_STAT_VOLATILE) &&
1320 (flags & QBMAN_DQ_STAT_EXPIRED))
1321 atomic_inc(&s->vdq.busy);
1325 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1326 void qbman_swp_dqrr_consume(struct qbman_swp *s,
1327 const struct qbman_result *dq)
1329 qbman_cinh_write(&s->sys,
1330 QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1333 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1334 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
1337 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
1340 /*********************************/
1341 /* Polling user-provided storage */
1342 /*********************************/
1344 int qbman_result_has_new_result(struct qbman_swp *s,
1345 struct qbman_result *dq)
1347 if (dq->dq.tok == 0)
1351 * Set token to be 0 so we will detect change back to 1
1352 * next time the looping is traversed. Const is cast away here
1353 * as we want users to treat the dequeue responses as read only.
1355 ((struct qbman_result *)dq)->dq.tok = 0;
1358 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1359 * the fact "VDQCR" shows busy doesn't mean that we hold the result
1360 * that makes it available. Eg. we may be looking at our 10th dequeue
1361 * result, having released VDQCR after the 1st result and it is now
1362 * busy due to some other command!
1364 if (s->vdq.storage == dq) {
1365 s->vdq.storage = NULL;
1366 atomic_inc(&s->vdq.busy);
1372 int qbman_check_new_result(struct qbman_result *dq)
1374 if (dq->dq.tok == 0)
1378 * Set token to be 0 so we will detect change back to 1
1379 * next time the looping is traversed. Const is cast away here
1380 * as we want users to treat the dequeue responses as read only.
1382 ((struct qbman_result *)dq)->dq.tok = 0;
1387 int qbman_check_command_complete(struct qbman_result *dq)
1389 struct qbman_swp *s;
1391 if (dq->dq.tok == 0)
1394 s = portal_idx_map[dq->dq.tok - 1];
1396 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1397 * the fact "VDQCR" shows busy doesn't mean that we hold the result
1398 * that makes it available. Eg. we may be looking at our 10th dequeue
1399 * result, having released VDQCR after the 1st result and it is now
1400 * busy due to some other command!
1402 if (s->vdq.storage == dq) {
1403 s->vdq.storage = NULL;
1404 atomic_inc(&s->vdq.busy);
1410 /********************************/
1411 /* Categorising qbman results */
1412 /********************************/
1414 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1417 uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
1419 return (response_verb == x);
1422 int qbman_result_is_DQ(const struct qbman_result *dq)
1424 return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1427 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1429 return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1432 int qbman_result_is_CDAN(const struct qbman_result *dq)
1434 return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1437 int qbman_result_is_CSCN(const struct qbman_result *dq)
1439 return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
1440 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1443 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1445 return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
1448 int qbman_result_is_CGCU(const struct qbman_result *dq)
1450 return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
1453 int qbman_result_is_FQRN(const struct qbman_result *dq)
1455 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1458 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1460 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1463 int qbman_result_is_FQPN(const struct qbman_result *dq)
1465 return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1468 /*********************************/
1469 /* Parsing frame dequeue results */
1470 /*********************************/
1472 /* These APIs assume qbman_result_is_DQ() is TRUE */
1474 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1479 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1481 return dq->dq.seqnum;
1484 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1486 return dq->dq.oprid;
1489 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1494 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1496 return dq->dq.fq_byte_cnt;
1499 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1501 return dq->dq.fq_frm_cnt;
1504 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1506 return dq->dq.fqd_ctx;
1509 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1511 return (const struct qbman_fd *)&dq->dq.fd[0];
1514 /**************************************/
1515 /* Parsing state-change notifications */
1516 /**************************************/
1517 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1519 return scn->scn.state;
1522 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1524 return scn->scn.rid_tok;
1527 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1529 return scn->scn.ctx;
1535 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1537 return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1540 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1542 return !(int)(qbman_result_SCN_state(scn) & 0x1);
1545 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1547 return (int)(qbman_result_SCN_state(scn) & 0x2);
1550 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1552 return (int)(qbman_result_SCN_state(scn) & 0x4);
1555 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1557 return qbman_result_SCN_ctx(scn);
1563 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1565 return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
1568 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1570 return qbman_result_SCN_ctx(scn);
1573 /******************/
1574 /* Buffer release */
1575 /******************/
1576 #define QB_BR_RC_VALID_SHIFT 5
1577 #define QB_BR_RCDI_SHIFT 6
1579 void qbman_release_desc_clear(struct qbman_release_desc *d)
1581 memset(d, 0, sizeof(*d));
1582 d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
1585 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
1590 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1593 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
1595 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
1598 #define RAR_IDX(rar) ((rar) & 0x7)
1599 #define RAR_VB(rar) ((rar) & 0x80)
1600 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1602 static int qbman_swp_release_direct(struct qbman_swp *s,
1603 const struct qbman_release_desc *d,
1604 const uint64_t *buffers,
1605 unsigned int num_buffers)
1608 const uint32_t *cl = qb_cl(d);
1609 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1611 pr_debug("RAR=%08x\n", rar);
1612 if (!RAR_SUCCESS(rar))
1615 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1617 /* Start the release command */
1618 p = qbman_cena_write_start_wo_shadow(&s->sys,
1619 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1621 /* Copy the caller's buffer pointers to the command */
1622 u64_to_le32_copy(&p[2], buffers, num_buffers);
1624 /* Set the verb byte, have to substitute in the valid-bit and the
1625 * number of buffers.
1628 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1629 qbman_cena_write_complete_wo_shadow(&s->sys,
1630 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1635 static int qbman_swp_release_mem_back(struct qbman_swp *s,
1636 const struct qbman_release_desc *d,
1637 const uint64_t *buffers,
1638 unsigned int num_buffers)
1641 const uint32_t *cl = qb_cl(d);
1642 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1644 pr_debug("RAR=%08x\n", rar);
1645 if (!RAR_SUCCESS(rar))
1648 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1650 /* Start the release command */
1651 p = qbman_cena_write_start_wo_shadow(&s->sys,
1652 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1654 /* Copy the caller's buffer pointers to the command */
1655 u64_to_le32_copy(&p[2], buffers, num_buffers);
1657 /* Set the verb byte, have to substitute in the valid-bit and the
1658 * number of buffers.
1660 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1662 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
1663 RAR_IDX(rar) * 4, QMAN_RT_MODE);
1668 inline int qbman_swp_release(struct qbman_swp *s,
1669 const struct qbman_release_desc *d,
1670 const uint64_t *buffers,
1671 unsigned int num_buffers)
1673 return qbman_swp_release_ptr(s, d, buffers, num_buffers);
1676 /*******************/
1677 /* Buffer acquires */
1678 /*******************/
1679 struct qbman_acquire_desc {
1684 uint8_t reserved2[59];
1687 struct qbman_acquire_rslt {
1692 uint8_t reserved2[3];
1696 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1697 unsigned int num_buffers)
1699 struct qbman_acquire_desc *p;
1700 struct qbman_acquire_rslt *r;
1702 if (!num_buffers || (num_buffers > 7))
1705 /* Start the management command */
1706 p = qbman_swp_mc_start(s);
1711 /* Encode the caller-provided attributes */
1713 p->num = num_buffers;
1715 /* Complete the management command */
1716 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1718 pr_err("qbman: acquire from BPID %d failed, no response\n",
1723 /* Decode the outcome */
1724 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
1726 /* Determine success or failure */
1727 if (r->rslt != QBMAN_MC_RSLT_OK) {
1728 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1733 QBMAN_BUG_ON(r->num > num_buffers);
1735 /* Copy the acquired buffers to the caller's array */
1736 u64_from_le32_copy(buffers, &r->buf[0], r->num);
1744 struct qbman_alt_fq_state_desc {
1746 uint8_t reserved[3];
1748 uint8_t reserved2[56];
1751 struct qbman_alt_fq_state_rslt {
1754 uint8_t reserved[62];
1757 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1759 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1760 uint8_t alt_fq_verb)
1762 struct qbman_alt_fq_state_desc *p;
1763 struct qbman_alt_fq_state_rslt *r;
1765 /* Start the management command */
1766 p = qbman_swp_mc_start(s);
1770 p->fqid = fqid & ALT_FQ_FQID_MASK;
1772 /* Complete the management command */
1773 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1775 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1780 /* Decode the outcome */
1781 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
1783 /* Determine success or failure */
1784 if (r->rslt != QBMAN_MC_RSLT_OK) {
1785 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1786 fqid, alt_fq_verb, r->rslt);
1793 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1795 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1798 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1800 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1803 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1805 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1808 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1810 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1813 /**********************/
1814 /* Channel management */
1815 /**********************/
1817 struct qbman_cdan_ctrl_desc {
1825 uint8_t reserved3[48];
1829 struct qbman_cdan_ctrl_rslt {
1833 uint8_t reserved[60];
1836 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1837 * would be irresponsible to expose it.
1839 #define CODE_CDAN_WE_EN 0x1
1840 #define CODE_CDAN_WE_CTX 0x4
1842 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1843 uint8_t we_mask, uint8_t cdan_en,
1846 struct qbman_cdan_ctrl_desc *p;
1847 struct qbman_cdan_ctrl_rslt *r;
1849 /* Start the management command */
1850 p = qbman_swp_mc_start(s);
1854 /* Encode the caller-provided attributes */
1863 /* Complete the management command */
1864 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1866 pr_err("qbman: wqchan config failed, no response\n");
1870 /* Decode the outcome */
1871 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
1872 != QBMAN_WQCHAN_CONFIGURE);
1874 /* Determine success or failure */
1875 if (r->rslt != QBMAN_MC_RSLT_OK) {
1876 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1877 channelid, r->rslt);
1884 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1887 return qbman_swp_CDAN_set(s, channelid,
1892 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1894 return qbman_swp_CDAN_set(s, channelid,
1899 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1901 return qbman_swp_CDAN_set(s, channelid,
1906 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1909 return qbman_swp_CDAN_set(s, channelid,
1910 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1914 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1916 return QBMAN_IDX_FROM_DQRR(dqrr);
1919 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1921 struct qbman_result *dq;
1923 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));