1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
9 #include "qbman_portal.h"
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE 0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE 0x48
20 #define QBMAN_FQ_FORCE 0x49
21 #define QBMAN_FQ_XON 0x4d
22 #define QBMAN_FQ_XOFF 0x4e
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
28 #define QBMAN_RESPONSE_VERB_MASK 0x7f
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT 29
34 #define QB_SDQCR_FC_MASK 0x1
35 #define QB_SDQCR_DCT_SHIFT 24
36 #define QB_SDQCR_DCT_MASK 0x3
37 #define QB_SDQCR_TOK_SHIFT 16
38 #define QB_SDQCR_TOK_MASK 0xff
39 #define QB_SDQCR_SRC_SHIFT 0
40 #define QB_SDQCR_SRC_MASK 0xffff
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN 0xbb
45 enum qbman_sdqcr_dct {
46 qbman_sdqcr_dct_null = 0,
47 qbman_sdqcr_dct_prio_ics,
48 qbman_sdqcr_dct_active_ics,
49 qbman_sdqcr_dct_active
53 qbman_sdqcr_fc_one = 0,
54 qbman_sdqcr_fc_up_to_3 = 1
57 /* We need to keep track of which SWP triggered a pull command
58 * so keep an array of portal IDs and use the token field to
59 * be able to find the proper portal
61 #define MAX_QBMAN_PORTALS 64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
64 /* Internal Function declaration */
66 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
67 const struct qbman_eq_desc *d,
68 const struct qbman_fd *fd);
70 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
71 const struct qbman_eq_desc *d,
72 const struct qbman_fd *fd);
75 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
76 const struct qbman_eq_desc *d,
77 const struct qbman_fd *fd);
79 qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
80 const struct qbman_eq_desc *d,
81 const struct qbman_fd *fd);
83 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
84 const struct qbman_eq_desc *d,
85 const struct qbman_fd *fd);
88 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
89 const struct qbman_eq_desc *d,
90 const struct qbman_fd *fd,
94 qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
95 const struct qbman_eq_desc *d,
96 const struct qbman_fd *fd,
100 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
101 const struct qbman_eq_desc *d,
102 const struct qbman_fd *fd,
107 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
108 const struct qbman_eq_desc *d,
109 struct qbman_fd **fd,
113 qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
114 const struct qbman_eq_desc *d,
115 struct qbman_fd **fd,
119 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
120 const struct qbman_eq_desc *d,
121 struct qbman_fd **fd,
126 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
127 const struct qbman_eq_desc *d,
128 const struct qbman_fd *fd,
131 qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
132 const struct qbman_eq_desc *d,
133 const struct qbman_fd *fd,
136 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
137 const struct qbman_eq_desc *d,
138 const struct qbman_fd *fd,
142 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
144 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
146 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
147 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
150 qbman_swp_release_direct(struct qbman_swp *s,
151 const struct qbman_release_desc *d,
152 const uint64_t *buffers, unsigned int num_buffers);
154 qbman_swp_release_mem_back(struct qbman_swp *s,
155 const struct qbman_release_desc *d,
156 const uint64_t *buffers, unsigned int num_buffers);
158 /* Function pointers */
159 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
160 const struct qbman_eq_desc *d,
161 const struct qbman_fd *fd)
162 = qbman_swp_enqueue_array_mode_direct;
164 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
165 const struct qbman_eq_desc *d,
166 const struct qbman_fd *fd)
167 = qbman_swp_enqueue_ring_mode_direct;
169 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
170 const struct qbman_eq_desc *d,
171 const struct qbman_fd *fd,
174 = qbman_swp_enqueue_multiple_direct;
176 static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
177 const struct qbman_eq_desc *d,
178 struct qbman_fd **fd,
181 = qbman_swp_enqueue_multiple_fd_direct;
183 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
184 const struct qbman_eq_desc *d,
185 const struct qbman_fd *fd,
187 = qbman_swp_enqueue_multiple_desc_direct;
189 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
190 struct qbman_pull_desc *d)
191 = qbman_swp_pull_direct;
193 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
194 = qbman_swp_dqrr_next_direct;
196 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
197 const struct qbman_release_desc *d,
198 const uint64_t *buffers, unsigned int num_buffers)
199 = qbman_swp_release_direct;
201 /*********************************/
202 /* Portal constructor/destructor */
203 /*********************************/
205 /* Software portals should always be in the power-on state when we initialise,
206 * due to the CCSR-based portal reset functionality that MC has.
208 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
209 * valid-bits, so we need to support a workaround where we don't trust
210 * valid-bits when detecting new entries until any stale ring entries have been
211 * overwritten at least once. The idea is that we read PI for the first few
212 * entries, then switch to valid-bit after that. The trick is to clear the
213 * bug-work-around boolean once the PI wraps around the ring for the first time.
215 * Note: this still carries a slight additional cost once the decrementer hits
218 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
223 struct qbman_swp *p = malloc(sizeof(*p));
228 memset(p, 0, sizeof(struct qbman_swp));
231 #ifdef QBMAN_CHECKING
232 p->mc.check = swp_mc_can_start;
234 p->mc.valid_bit = QB_VALID_BIT;
235 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
236 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
237 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
238 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
239 && (d->cena_access_mode == qman_cena_fastest_access))
240 p->mr.valid_bit = QB_VALID_BIT;
242 atomic_set(&p->vdq.busy, 1);
243 p->vdq.valid_bit = QB_VALID_BIT;
244 p->dqrr.valid_bit = QB_VALID_BIT;
245 qman_version = p->desc.qman_version;
246 if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
247 p->dqrr.dqrr_size = 4;
248 p->dqrr.reset_bug = 1;
250 p->dqrr.dqrr_size = 8;
251 p->dqrr.reset_bug = 0;
254 ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
257 pr_err("qbman_swp_sys_init() failed %d\n", ret);
261 /* Verify that the DQRRPI is 0 - if it is not the portal isn't
262 * in default state which is an error
264 if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
265 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
270 /* SDQCR needs to be initialized to 0 when no channels are
271 * being dequeued from or else the QMan HW will indicate an
272 * error. The values that were calculated above will be
273 * applied when dequeues from a specific channel are enabled.
275 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
277 p->eqcr.pi_ring_size = 8;
278 if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
279 && (d->cena_access_mode == qman_cena_fastest_access)) {
280 p->eqcr.pi_ring_size = 32;
281 qbman_swp_enqueue_array_mode_ptr =
282 qbman_swp_enqueue_array_mode_mem_back;
283 qbman_swp_enqueue_ring_mode_ptr =
284 qbman_swp_enqueue_ring_mode_mem_back;
285 qbman_swp_enqueue_multiple_ptr =
286 qbman_swp_enqueue_multiple_mem_back;
287 qbman_swp_enqueue_multiple_fd_ptr =
288 qbman_swp_enqueue_multiple_fd_mem_back;
289 qbman_swp_enqueue_multiple_desc_ptr =
290 qbman_swp_enqueue_multiple_desc_mem_back;
291 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
292 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
293 qbman_swp_release_ptr = qbman_swp_release_mem_back;
296 if (dpaa2_svr_family == SVR_LS1080A) {
297 qbman_swp_enqueue_ring_mode_ptr =
298 qbman_swp_enqueue_ring_mode_cinh_direct;
299 qbman_swp_enqueue_multiple_ptr =
300 qbman_swp_enqueue_multiple_cinh_direct;
301 qbman_swp_enqueue_multiple_fd_ptr =
302 qbman_swp_enqueue_multiple_fd_cinh_direct;
303 qbman_swp_enqueue_multiple_desc_ptr =
304 qbman_swp_enqueue_multiple_desc_cinh_direct;
307 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
308 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
309 eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
310 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
311 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
312 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
313 && (d->cena_access_mode == qman_cena_fastest_access))
314 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
315 & p->eqcr.pi_ci_mask;
317 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
318 & p->eqcr.pi_ci_mask;
319 p->eqcr.available = p->eqcr.pi_ring_size -
320 qm_cyc_diff(p->eqcr.pi_ring_size,
321 p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
322 p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
324 portal_idx_map[p->desc.idx] = p;
328 void qbman_swp_finish(struct qbman_swp *p)
330 #ifdef QBMAN_CHECKING
331 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
333 qbman_swp_sys_finish(&p->sys);
334 portal_idx_map[p->desc.idx] = NULL;
338 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
347 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
349 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
352 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
354 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
357 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
359 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
362 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
364 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
367 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
369 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
372 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
374 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
377 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
379 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
382 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
384 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
387 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
389 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
392 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
394 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
397 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
399 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
402 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
404 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
405 inhibit ? 0xffffffff : 0);
408 /***********************/
409 /* Management commands */
410 /***********************/
413 * Internal code common to all types of management commands.
416 void *qbman_swp_mc_start(struct qbman_swp *p)
419 #ifdef QBMAN_CHECKING
420 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
422 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
423 && (p->desc.cena_access_mode == qman_cena_fastest_access))
424 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
426 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
427 #ifdef QBMAN_CHECKING
429 p->mc.check = swp_mc_can_submit;
434 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
437 #ifdef QBMAN_CHECKING
438 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
440 /* TBD: "|=" is going to hurt performance. Need to move as many fields
441 * out of word zero, and for those that remain, the "OR" needs to occur
442 * at the caller side. This debug check helps to catch cases where the
443 * caller wants to OR but has forgotten to do so.
445 QBMAN_BUG_ON((*v & cmd_verb) != *v);
446 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
447 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
448 *v = cmd_verb | p->mr.valid_bit;
449 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
451 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
454 *v = cmd_verb | p->mc.valid_bit;
455 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
458 #ifdef QBMAN_CHECKING
459 p->mc.check = swp_mc_can_poll;
463 void *qbman_swp_mc_result(struct qbman_swp *p)
466 #ifdef QBMAN_CHECKING
467 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
469 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
470 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
471 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
472 /* Command completed if the valid bit is toggled */
473 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
475 /* Remove the valid-bit -
476 * command completed iff the rest is non-zero
478 verb = ret[0] & ~QB_VALID_BIT;
481 p->mr.valid_bit ^= QB_VALID_BIT;
483 qbman_cena_invalidate_prefetch(&p->sys,
484 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
485 ret = qbman_cena_read(&p->sys,
486 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
487 /* Remove the valid-bit -
488 * command completed iff the rest is non-zero
490 verb = ret[0] & ~QB_VALID_BIT;
493 p->mc.valid_bit ^= QB_VALID_BIT;
495 #ifdef QBMAN_CHECKING
496 p->mc.check = swp_mc_can_start;
505 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
506 enum qb_enqueue_commands {
508 enqueue_response_always = 1,
509 enqueue_rejects_to_fq = 2
512 #define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
513 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
514 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
515 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
516 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
517 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
518 #define QB_ENQUEUE_CMD_NLIS_SHIFT 14
519 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
521 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
523 memset(d, 0, sizeof(*d));
526 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
528 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
530 d->eq.verb |= enqueue_response_always;
532 d->eq.verb |= enqueue_rejects_to_fq;
535 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
536 uint16_t opr_id, uint16_t seqnum, int incomplete)
538 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
540 d->eq.verb |= enqueue_response_always;
542 d->eq.verb |= enqueue_rejects_to_fq;
544 d->eq.orpid = opr_id;
545 d->eq.seqnum = seqnum;
547 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
549 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
552 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
555 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
556 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
557 d->eq.orpid = opr_id;
558 d->eq.seqnum = seqnum;
559 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
560 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
563 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
566 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
567 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
568 d->eq.orpid = opr_id;
569 d->eq.seqnum = seqnum;
570 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
571 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
574 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
575 dma_addr_t storage_phys,
578 d->eq.rsp_addr = storage_phys;
582 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
587 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
589 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
593 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
594 uint16_t qd_bin, uint8_t qd_prio)
596 d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
598 d->eq.qdbin = qd_bin;
599 d->eq.qpri = qd_prio;
602 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
605 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
607 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
610 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
611 uint8_t dqrr_idx, int park)
614 d->eq.dca = dqrr_idx;
616 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
618 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
619 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
621 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
625 #define EQAR_IDX(eqar) ((eqar) & 0x1f)
626 #define EQAR_VB(eqar) ((eqar) & 0x80)
627 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
629 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
633 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
636 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
642 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
643 const struct qbman_eq_desc *d,
644 const struct qbman_fd *fd)
647 const uint32_t *cl = qb_cl(d);
648 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
650 pr_debug("EQAR=%08x\n", eqar);
651 if (!EQAR_SUCCESS(eqar))
653 p = qbman_cena_write_start_wo_shadow(&s->sys,
654 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
655 memcpy(&p[1], &cl[1], 28);
656 memcpy(&p[8], fd, sizeof(*fd));
658 /* Set the verb byte, have to substitute in the valid-bit */
660 p[0] = cl[0] | EQAR_VB(eqar);
661 qbman_cena_write_complete_wo_shadow(&s->sys,
662 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
665 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
666 const struct qbman_eq_desc *d,
667 const struct qbman_fd *fd)
670 const uint32_t *cl = qb_cl(d);
671 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
673 pr_debug("EQAR=%08x\n", eqar);
674 if (!EQAR_SUCCESS(eqar))
676 p = qbman_cena_write_start_wo_shadow(&s->sys,
677 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
678 memcpy(&p[1], &cl[1], 28);
679 memcpy(&p[8], fd, sizeof(*fd));
681 /* Set the verb byte, have to substitute in the valid-bit */
682 p[0] = cl[0] | EQAR_VB(eqar);
684 qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
688 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
689 const struct qbman_eq_desc *d,
690 const struct qbman_fd *fd)
692 return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
695 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
696 const struct qbman_eq_desc *d,
697 const struct qbman_fd *fd)
700 const uint32_t *cl = qb_cl(d);
701 uint32_t eqcr_ci, full_mask, half_mask;
703 half_mask = (s->eqcr.pi_ci_mask>>1);
704 full_mask = s->eqcr.pi_ci_mask;
705 if (!s->eqcr.available) {
706 eqcr_ci = s->eqcr.ci;
707 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
708 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
709 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
710 eqcr_ci, s->eqcr.ci);
711 if (!s->eqcr.available)
715 p = qbman_cena_write_start_wo_shadow(&s->sys,
716 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
717 memcpy(&p[1], &cl[1], 28);
718 memcpy(&p[8], fd, sizeof(*fd));
721 /* Set the verb byte, have to substitute in the valid-bit */
722 p[0] = cl[0] | s->eqcr.pi_vb;
723 qbman_cena_write_complete_wo_shadow(&s->sys,
724 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
726 s->eqcr.pi &= full_mask;
728 if (!(s->eqcr.pi & half_mask))
729 s->eqcr.pi_vb ^= QB_VALID_BIT;
734 static int qbman_swp_enqueue_ring_mode_cinh_direct(
736 const struct qbman_eq_desc *d,
737 const struct qbman_fd *fd)
740 const uint32_t *cl = qb_cl(d);
741 uint32_t eqcr_ci, full_mask, half_mask;
743 half_mask = (s->eqcr.pi_ci_mask>>1);
744 full_mask = s->eqcr.pi_ci_mask;
745 if (!s->eqcr.available) {
746 eqcr_ci = s->eqcr.ci;
747 s->eqcr.ci = qbman_cinh_read(&s->sys,
748 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
749 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
750 eqcr_ci, s->eqcr.ci);
751 if (!s->eqcr.available)
755 p = qbman_cena_write_start_wo_shadow(&s->sys,
756 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
757 memcpy(&p[1], &cl[1], 28);
758 memcpy(&p[8], fd, sizeof(*fd));
761 /* Set the verb byte, have to substitute in the valid-bit */
762 p[0] = cl[0] | s->eqcr.pi_vb;
763 qbman_cena_write_complete_wo_shadow(&s->sys,
764 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
766 s->eqcr.pi &= full_mask;
768 if (!(s->eqcr.pi & half_mask))
769 s->eqcr.pi_vb ^= QB_VALID_BIT;
774 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
775 const struct qbman_eq_desc *d,
776 const struct qbman_fd *fd)
779 const uint32_t *cl = qb_cl(d);
780 uint32_t eqcr_ci, full_mask, half_mask;
782 half_mask = (s->eqcr.pi_ci_mask>>1);
783 full_mask = s->eqcr.pi_ci_mask;
784 if (!s->eqcr.available) {
785 eqcr_ci = s->eqcr.ci;
786 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
787 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
788 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
789 eqcr_ci, s->eqcr.ci);
790 if (!s->eqcr.available)
794 p = qbman_cena_write_start_wo_shadow(&s->sys,
795 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
796 memcpy(&p[1], &cl[1], 28);
797 memcpy(&p[8], fd, sizeof(*fd));
799 /* Set the verb byte, have to substitute in the valid-bit */
800 p[0] = cl[0] | s->eqcr.pi_vb;
802 s->eqcr.pi &= full_mask;
804 if (!(s->eqcr.pi & half_mask))
805 s->eqcr.pi_vb ^= QB_VALID_BIT;
807 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
808 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
812 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
813 const struct qbman_eq_desc *d,
814 const struct qbman_fd *fd)
816 return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
819 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
820 const struct qbman_fd *fd)
822 if (s->sys.eqcr_mode == qman_eqcr_vb_array)
823 return qbman_swp_enqueue_array_mode(s, d, fd);
824 else /* Use ring mode by default */
825 return qbman_swp_enqueue_ring_mode(s, d, fd);
828 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
829 const struct qbman_eq_desc *d,
830 const struct qbman_fd *fd,
835 const uint32_t *cl = qb_cl(d);
836 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
837 int i, num_enqueued = 0;
840 half_mask = (s->eqcr.pi_ci_mask>>1);
841 full_mask = s->eqcr.pi_ci_mask;
842 if (!s->eqcr.available) {
843 eqcr_ci = s->eqcr.ci;
844 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
845 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
846 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
847 eqcr_ci, s->eqcr.ci);
848 if (!s->eqcr.available)
852 eqcr_pi = s->eqcr.pi;
853 num_enqueued = (s->eqcr.available < num_frames) ?
854 s->eqcr.available : num_frames;
855 s->eqcr.available -= num_enqueued;
856 /* Fill in the EQCR ring */
857 for (i = 0; i < num_enqueued; i++) {
858 p = qbman_cena_write_start_wo_shadow(&s->sys,
859 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
860 memcpy(&p[1], &cl[1], 28);
861 memcpy(&p[8], &fd[i], sizeof(*fd));
867 /* Set the verb byte, have to substitute in the valid-bit */
868 eqcr_pi = s->eqcr.pi;
869 for (i = 0; i < num_enqueued; i++) {
870 p = qbman_cena_write_start_wo_shadow(&s->sys,
871 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
872 p[0] = cl[0] | s->eqcr.pi_vb;
873 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
874 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
876 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
877 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
880 if (!(eqcr_pi & half_mask))
881 s->eqcr.pi_vb ^= QB_VALID_BIT;
884 /* Flush all the cacheline without load/store in between */
885 eqcr_pi = s->eqcr.pi;
886 addr_cena = (size_t)s->sys.addr_cena;
887 for (i = 0; i < num_enqueued; i++) {
888 dcbf((uintptr_t)(addr_cena +
889 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
892 s->eqcr.pi = eqcr_pi & full_mask;
897 static int qbman_swp_enqueue_multiple_cinh_direct(
899 const struct qbman_eq_desc *d,
900 const struct qbman_fd *fd,
905 const uint32_t *cl = qb_cl(d);
906 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
907 int i, num_enqueued = 0;
910 half_mask = (s->eqcr.pi_ci_mask>>1);
911 full_mask = s->eqcr.pi_ci_mask;
912 if (!s->eqcr.available) {
913 eqcr_ci = s->eqcr.ci;
914 s->eqcr.ci = qbman_cinh_read(&s->sys,
915 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
916 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
917 eqcr_ci, s->eqcr.ci);
918 if (!s->eqcr.available)
922 eqcr_pi = s->eqcr.pi;
923 num_enqueued = (s->eqcr.available < num_frames) ?
924 s->eqcr.available : num_frames;
925 s->eqcr.available -= num_enqueued;
926 /* Fill in the EQCR ring */
927 for (i = 0; i < num_enqueued; i++) {
928 p = qbman_cena_write_start_wo_shadow(&s->sys,
929 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
930 memcpy(&p[1], &cl[1], 28);
931 memcpy(&p[8], &fd[i], sizeof(*fd));
937 /* Set the verb byte, have to substitute in the valid-bit */
938 eqcr_pi = s->eqcr.pi;
939 for (i = 0; i < num_enqueued; i++) {
940 p = qbman_cena_write_start_wo_shadow(&s->sys,
941 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
942 p[0] = cl[0] | s->eqcr.pi_vb;
943 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
944 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
946 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
947 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
950 if (!(eqcr_pi & half_mask))
951 s->eqcr.pi_vb ^= QB_VALID_BIT;
954 /* Flush all the cacheline without load/store in between */
955 eqcr_pi = s->eqcr.pi;
956 addr_cena = (size_t)s->sys.addr_cena;
957 for (i = 0; i < num_enqueued; i++) {
959 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
962 s->eqcr.pi = eqcr_pi & full_mask;
967 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
968 const struct qbman_eq_desc *d,
969 const struct qbman_fd *fd,
974 const uint32_t *cl = qb_cl(d);
975 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
976 int i, num_enqueued = 0;
978 half_mask = (s->eqcr.pi_ci_mask>>1);
979 full_mask = s->eqcr.pi_ci_mask;
980 if (!s->eqcr.available) {
981 eqcr_ci = s->eqcr.ci;
982 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
983 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
984 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
985 eqcr_ci, s->eqcr.ci);
986 if (!s->eqcr.available)
990 eqcr_pi = s->eqcr.pi;
991 num_enqueued = (s->eqcr.available < num_frames) ?
992 s->eqcr.available : num_frames;
993 s->eqcr.available -= num_enqueued;
994 /* Fill in the EQCR ring */
995 for (i = 0; i < num_enqueued; i++) {
996 p = qbman_cena_write_start_wo_shadow(&s->sys,
997 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
998 memcpy(&p[1], &cl[1], 28);
999 memcpy(&p[8], &fd[i], sizeof(*fd));
1000 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1001 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1003 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1004 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1007 p[0] = cl[0] | s->eqcr.pi_vb;
1009 if (!(eqcr_pi & half_mask))
1010 s->eqcr.pi_vb ^= QB_VALID_BIT;
1012 s->eqcr.pi = eqcr_pi & full_mask;
1015 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1016 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1017 return num_enqueued;
1020 inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1021 const struct qbman_eq_desc *d,
1022 const struct qbman_fd *fd,
1026 return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
1029 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
1030 const struct qbman_eq_desc *d,
1031 struct qbman_fd **fd,
1036 const uint32_t *cl = qb_cl(d);
1037 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1038 int i, num_enqueued = 0;
1041 half_mask = (s->eqcr.pi_ci_mask>>1);
1042 full_mask = s->eqcr.pi_ci_mask;
1043 if (!s->eqcr.available) {
1044 eqcr_ci = s->eqcr.ci;
1045 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1046 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1047 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1048 eqcr_ci, s->eqcr.ci);
1049 if (!s->eqcr.available)
1053 eqcr_pi = s->eqcr.pi;
1054 num_enqueued = (s->eqcr.available < num_frames) ?
1055 s->eqcr.available : num_frames;
1056 s->eqcr.available -= num_enqueued;
1057 /* Fill in the EQCR ring */
1058 for (i = 0; i < num_enqueued; i++) {
1059 p = qbman_cena_write_start_wo_shadow(&s->sys,
1060 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1061 memcpy(&p[1], &cl[1], 28);
1062 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1068 /* Set the verb byte, have to substitute in the valid-bit */
1069 eqcr_pi = s->eqcr.pi;
1070 for (i = 0; i < num_enqueued; i++) {
1071 p = qbman_cena_write_start_wo_shadow(&s->sys,
1072 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1073 p[0] = cl[0] | s->eqcr.pi_vb;
1074 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1075 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1077 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1078 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1081 if (!(eqcr_pi & half_mask))
1082 s->eqcr.pi_vb ^= QB_VALID_BIT;
1085 /* Flush all the cacheline without load/store in between */
1086 eqcr_pi = s->eqcr.pi;
1087 addr_cena = (size_t)s->sys.addr_cena;
1088 for (i = 0; i < num_enqueued; i++) {
1090 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1093 s->eqcr.pi = eqcr_pi & full_mask;
1095 return num_enqueued;
1098 static int qbman_swp_enqueue_multiple_fd_cinh_direct(
1099 struct qbman_swp *s,
1100 const struct qbman_eq_desc *d,
1101 struct qbman_fd **fd,
1106 const uint32_t *cl = qb_cl(d);
1107 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1108 int i, num_enqueued = 0;
1111 half_mask = (s->eqcr.pi_ci_mask>>1);
1112 full_mask = s->eqcr.pi_ci_mask;
1113 if (!s->eqcr.available) {
1114 eqcr_ci = s->eqcr.ci;
1115 s->eqcr.ci = qbman_cinh_read(&s->sys,
1116 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1117 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1118 eqcr_ci, s->eqcr.ci);
1119 if (!s->eqcr.available)
1123 eqcr_pi = s->eqcr.pi;
1124 num_enqueued = (s->eqcr.available < num_frames) ?
1125 s->eqcr.available : num_frames;
1126 s->eqcr.available -= num_enqueued;
1127 /* Fill in the EQCR ring */
1128 for (i = 0; i < num_enqueued; i++) {
1129 p = qbman_cena_write_start_wo_shadow(&s->sys,
1130 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1131 memcpy(&p[1], &cl[1], 28);
1132 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1138 /* Set the verb byte, have to substitute in the valid-bit */
1139 eqcr_pi = s->eqcr.pi;
1140 for (i = 0; i < num_enqueued; i++) {
1141 p = qbman_cena_write_start_wo_shadow(&s->sys,
1142 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1143 p[0] = cl[0] | s->eqcr.pi_vb;
1144 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1145 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1147 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1148 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1151 if (!(eqcr_pi & half_mask))
1152 s->eqcr.pi_vb ^= QB_VALID_BIT;
1155 /* Flush all the cacheline without load/store in between */
1156 eqcr_pi = s->eqcr.pi;
1157 addr_cena = (size_t)s->sys.addr_cena;
1158 for (i = 0; i < num_enqueued; i++) {
1160 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1163 s->eqcr.pi = eqcr_pi & full_mask;
1165 return num_enqueued;
1168 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
1169 const struct qbman_eq_desc *d,
1170 struct qbman_fd **fd,
1175 const uint32_t *cl = qb_cl(d);
1176 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1177 int i, num_enqueued = 0;
1179 half_mask = (s->eqcr.pi_ci_mask>>1);
1180 full_mask = s->eqcr.pi_ci_mask;
1181 if (!s->eqcr.available) {
1182 eqcr_ci = s->eqcr.ci;
1183 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1184 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1185 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1186 eqcr_ci, s->eqcr.ci);
1187 if (!s->eqcr.available)
1191 eqcr_pi = s->eqcr.pi;
1192 num_enqueued = (s->eqcr.available < num_frames) ?
1193 s->eqcr.available : num_frames;
1194 s->eqcr.available -= num_enqueued;
1195 /* Fill in the EQCR ring */
1196 for (i = 0; i < num_enqueued; i++) {
1197 p = qbman_cena_write_start_wo_shadow(&s->sys,
1198 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1199 memcpy(&p[1], &cl[1], 28);
1200 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1204 /* Set the verb byte, have to substitute in the valid-bit */
1205 eqcr_pi = s->eqcr.pi;
1206 for (i = 0; i < num_enqueued; i++) {
1207 p = qbman_cena_write_start_wo_shadow(&s->sys,
1208 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1209 p[0] = cl[0] | s->eqcr.pi_vb;
1210 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1211 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1213 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1214 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1217 if (!(eqcr_pi & half_mask))
1218 s->eqcr.pi_vb ^= QB_VALID_BIT;
1220 s->eqcr.pi = eqcr_pi & full_mask;
1223 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1224 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1225 return num_enqueued;
1228 inline int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1229 const struct qbman_eq_desc *d,
1230 struct qbman_fd **fd,
1234 return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags, num_frames);
1237 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
1238 const struct qbman_eq_desc *d,
1239 const struct qbman_fd *fd,
1244 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1245 int i, num_enqueued = 0;
1248 half_mask = (s->eqcr.pi_ci_mask>>1);
1249 full_mask = s->eqcr.pi_ci_mask;
1250 if (!s->eqcr.available) {
1251 eqcr_ci = s->eqcr.ci;
1252 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1253 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1254 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1255 eqcr_ci, s->eqcr.ci);
1256 if (!s->eqcr.available)
1260 eqcr_pi = s->eqcr.pi;
1261 num_enqueued = (s->eqcr.available < num_frames) ?
1262 s->eqcr.available : num_frames;
1263 s->eqcr.available -= num_enqueued;
1264 /* Fill in the EQCR ring */
1265 for (i = 0; i < num_enqueued; i++) {
1266 p = qbman_cena_write_start_wo_shadow(&s->sys,
1267 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1269 memcpy(&p[1], &cl[1], 28);
1270 memcpy(&p[8], &fd[i], sizeof(*fd));
1276 /* Set the verb byte, have to substitute in the valid-bit */
1277 eqcr_pi = s->eqcr.pi;
1278 for (i = 0; i < num_enqueued; i++) {
1279 p = qbman_cena_write_start_wo_shadow(&s->sys,
1280 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1282 p[0] = cl[0] | s->eqcr.pi_vb;
1284 if (!(eqcr_pi & half_mask))
1285 s->eqcr.pi_vb ^= QB_VALID_BIT;
1288 /* Flush all the cacheline without load/store in between */
1289 eqcr_pi = s->eqcr.pi;
1290 addr_cena = (size_t)s->sys.addr_cena;
1291 for (i = 0; i < num_enqueued; i++) {
1292 dcbf((uintptr_t)(addr_cena +
1293 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1296 s->eqcr.pi = eqcr_pi & full_mask;
1298 return num_enqueued;
1301 static int qbman_swp_enqueue_multiple_desc_cinh_direct(
1302 struct qbman_swp *s,
1303 const struct qbman_eq_desc *d,
1304 const struct qbman_fd *fd,
1309 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1310 int i, num_enqueued = 0;
1313 half_mask = (s->eqcr.pi_ci_mask>>1);
1314 full_mask = s->eqcr.pi_ci_mask;
1315 if (!s->eqcr.available) {
1316 eqcr_ci = s->eqcr.ci;
1317 s->eqcr.ci = qbman_cinh_read(&s->sys,
1318 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1319 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1320 eqcr_ci, s->eqcr.ci);
1321 if (!s->eqcr.available)
1325 eqcr_pi = s->eqcr.pi;
1326 num_enqueued = (s->eqcr.available < num_frames) ?
1327 s->eqcr.available : num_frames;
1328 s->eqcr.available -= num_enqueued;
1329 /* Fill in the EQCR ring */
1330 for (i = 0; i < num_enqueued; i++) {
1331 p = qbman_cena_write_start_wo_shadow(&s->sys,
1332 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1334 memcpy(&p[1], &cl[1], 28);
1335 memcpy(&p[8], &fd[i], sizeof(*fd));
1341 /* Set the verb byte, have to substitute in the valid-bit */
1342 eqcr_pi = s->eqcr.pi;
1343 for (i = 0; i < num_enqueued; i++) {
1344 p = qbman_cena_write_start_wo_shadow(&s->sys,
1345 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1347 p[0] = cl[0] | s->eqcr.pi_vb;
1349 if (!(eqcr_pi & half_mask))
1350 s->eqcr.pi_vb ^= QB_VALID_BIT;
1353 /* Flush all the cacheline without load/store in between */
1354 eqcr_pi = s->eqcr.pi;
1355 addr_cena = (size_t)s->sys.addr_cena;
1356 for (i = 0; i < num_enqueued; i++) {
1358 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1361 s->eqcr.pi = eqcr_pi & full_mask;
1363 return num_enqueued;
1366 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
1367 const struct qbman_eq_desc *d,
1368 const struct qbman_fd *fd,
1373 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1374 int i, num_enqueued = 0;
1376 half_mask = (s->eqcr.pi_ci_mask>>1);
1377 full_mask = s->eqcr.pi_ci_mask;
1378 if (!s->eqcr.available) {
1379 eqcr_ci = s->eqcr.ci;
1380 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1381 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1382 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1383 eqcr_ci, s->eqcr.ci);
1384 if (!s->eqcr.available)
1388 eqcr_pi = s->eqcr.pi;
1389 num_enqueued = (s->eqcr.available < num_frames) ?
1390 s->eqcr.available : num_frames;
1391 s->eqcr.available -= num_enqueued;
1392 /* Fill in the EQCR ring */
1393 for (i = 0; i < num_enqueued; i++) {
1394 p = qbman_cena_write_start_wo_shadow(&s->sys,
1395 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1397 memcpy(&p[1], &cl[1], 28);
1398 memcpy(&p[8], &fd[i], sizeof(*fd));
1402 /* Set the verb byte, have to substitute in the valid-bit */
1403 eqcr_pi = s->eqcr.pi;
1404 for (i = 0; i < num_enqueued; i++) {
1405 p = qbman_cena_write_start_wo_shadow(&s->sys,
1406 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1408 p[0] = cl[0] | s->eqcr.pi_vb;
1410 if (!(eqcr_pi & half_mask))
1411 s->eqcr.pi_vb ^= QB_VALID_BIT;
1414 s->eqcr.pi = eqcr_pi & full_mask;
1417 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1418 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1420 return num_enqueued;
1422 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1423 const struct qbman_eq_desc *d,
1424 const struct qbman_fd *fd,
1427 return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
1430 /*************************/
1431 /* Static (push) dequeue */
1432 /*************************/
1434 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1436 uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1438 QBMAN_BUG_ON(channel_idx > 15);
1439 *enabled = src | (1 << channel_idx);
1442 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1446 QBMAN_BUG_ON(channel_idx > 15);
1448 s->sdq |= 1 << channel_idx;
1450 s->sdq &= ~(1 << channel_idx);
1452 /* Read make the complete src map. If no channels are enabled
1453 * the SDQCR must be 0 or else QMan will assert errors
1455 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1457 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1459 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1462 /***************************/
1463 /* Volatile (pull) dequeue */
1464 /***************************/
1466 /* These should be const, eventually */
1467 #define QB_VDQCR_VERB_DCT_SHIFT 0
1468 #define QB_VDQCR_VERB_DT_SHIFT 2
1469 #define QB_VDQCR_VERB_RLS_SHIFT 4
1470 #define QB_VDQCR_VERB_WAE_SHIFT 5
1471 #define QB_VDQCR_VERB_RAD_SHIFT 6
1475 qb_pull_dt_workqueue,
1476 qb_pull_dt_framequeue
1479 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1481 memset(d, 0, sizeof(*d));
1484 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1485 struct qbman_result *storage,
1486 dma_addr_t storage_phys,
1489 d->pull.rsp_addr_virt = (size_t)storage;
1492 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1495 d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1497 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1499 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1501 d->pull.rsp_addr = storage_phys;
1504 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1507 d->pull.numf = numframes - 1;
1510 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1512 d->pull.tok = token;
1515 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1517 d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1518 d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1519 d->pull.dq_src = fqid;
1522 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1523 enum qbman_pull_type_e dct)
1525 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1526 d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1527 d->pull.dq_src = wqid;
1530 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1531 enum qbman_pull_type_e dct)
1533 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1534 d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1535 d->pull.dq_src = chid;
1538 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1540 if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1542 d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1544 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1546 printf("The RAD feature is not valid when RLS = 0\n");
1550 static int qbman_swp_pull_direct(struct qbman_swp *s,
1551 struct qbman_pull_desc *d)
1554 uint32_t *cl = qb_cl(d);
1556 if (!atomic_dec_and_test(&s->vdq.busy)) {
1557 atomic_inc(&s->vdq.busy);
1561 d->pull.tok = s->sys.idx + 1;
1562 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1563 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1564 memcpy(&p[1], &cl[1], 12);
1566 /* Set the verb byte, have to substitute in the valid-bit */
1568 p[0] = cl[0] | s->vdq.valid_bit;
1569 s->vdq.valid_bit ^= QB_VALID_BIT;
1570 qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1575 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1576 struct qbman_pull_desc *d)
1579 uint32_t *cl = qb_cl(d);
1581 if (!atomic_dec_and_test(&s->vdq.busy)) {
1582 atomic_inc(&s->vdq.busy);
1586 d->pull.tok = s->sys.idx + 1;
1587 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1588 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1589 memcpy(&p[1], &cl[1], 12);
1591 /* Set the verb byte, have to substitute in the valid-bit */
1592 p[0] = cl[0] | s->vdq.valid_bit;
1593 s->vdq.valid_bit ^= QB_VALID_BIT;
1595 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1600 inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1602 return qbman_swp_pull_ptr(s, d);
1609 #define QMAN_DQRR_PI_MASK 0xf
1611 #define QBMAN_RESULT_DQ 0x60
1612 #define QBMAN_RESULT_FQRN 0x21
1613 #define QBMAN_RESULT_FQRNI 0x22
1614 #define QBMAN_RESULT_FQPN 0x24
1615 #define QBMAN_RESULT_FQDAN 0x25
1616 #define QBMAN_RESULT_CDAN 0x26
1617 #define QBMAN_RESULT_CSCN_MEM 0x27
1618 #define QBMAN_RESULT_CGCU 0x28
1619 #define QBMAN_RESULT_BPSCN 0x29
1620 #define QBMAN_RESULT_CSCN_WQ 0x2a
1622 #include <rte_prefetch.h>
1624 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1626 const struct qbman_result *p;
1628 p = qbman_cena_read_wo_shadow(&s->sys,
1629 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1633 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1634 * only once, so repeated calls can return a sequence of DQRR entries, without
1635 * requiring they be consumed immediately or in any particular order.
1637 inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1639 return qbman_swp_dqrr_next_ptr(s);
1642 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1645 uint32_t response_verb;
1647 const struct qbman_result *p;
1649 /* Before using valid-bit to detect if something is there, we have to
1650 * handle the case of the DQRR reset bug...
1652 if (s->dqrr.reset_bug) {
1653 /* We pick up new entries by cache-inhibited producer index,
1654 * which means that a non-coherent mapping would require us to
1655 * invalidate and read *only* once that PI has indicated that
1656 * there's an entry here. The first trip around the DQRR ring
1657 * will be much less efficient than all subsequent trips around
1660 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
1663 /* there are new entries if pi != next_idx */
1664 if (pi == s->dqrr.next_idx)
1667 /* if next_idx is/was the last ring index, and 'pi' is
1668 * different, we can disable the workaround as all the ring
1669 * entries have now been DMA'd to so valid-bit checking is
1670 * repaired. Note: this logic needs to be based on next_idx
1671 * (which increments one at a time), rather than on pi (which
1672 * can burst and wrap-around between our snapshots of it).
1674 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
1675 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
1676 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1677 s->dqrr.next_idx, pi);
1678 s->dqrr.reset_bug = 0;
1680 qbman_cena_invalidate_prefetch(&s->sys,
1681 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1683 p = qbman_cena_read_wo_shadow(&s->sys,
1684 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1688 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1689 * in the DQRR reset bug workaround, we shouldn't need to skip these
1690 * check, because we've already determined that a new entry is available
1691 * and we've invalidated the cacheline before reading it, so the
1692 * valid-bit behaviour is repaired and should tell us what we already
1693 * knew from reading PI.
1695 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1698 /* There's something there. Move "next_idx" attention to the next ring
1699 * entry (and prefetch it) before returning what we found.
1702 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1703 s->dqrr.next_idx = 0;
1704 s->dqrr.valid_bit ^= QB_VALID_BIT;
1706 /* If this is the final response to a volatile dequeue command
1707 * indicate that the vdq is no longer busy
1710 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1711 if ((response_verb == QBMAN_RESULT_DQ) &&
1712 (flags & QBMAN_DQ_STAT_VOLATILE) &&
1713 (flags & QBMAN_DQ_STAT_EXPIRED))
1714 atomic_inc(&s->vdq.busy);
1719 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1722 uint32_t response_verb;
1724 const struct qbman_result *p;
1726 p = qbman_cena_read_wo_shadow(&s->sys,
1727 QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1731 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1732 * in the DQRR reset bug workaround, we shouldn't need to skip these
1733 * check, because we've already determined that a new entry is available
1734 * and we've invalidated the cacheline before reading it, so the
1735 * valid-bit behaviour is repaired and should tell us what we already
1736 * knew from reading PI.
1738 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1741 /* There's something there. Move "next_idx" attention to the next ring
1742 * entry (and prefetch it) before returning what we found.
1745 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1746 s->dqrr.next_idx = 0;
1747 s->dqrr.valid_bit ^= QB_VALID_BIT;
1749 /* If this is the final response to a volatile dequeue command
1750 * indicate that the vdq is no longer busy
1753 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1754 if ((response_verb == QBMAN_RESULT_DQ)
1755 && (flags & QBMAN_DQ_STAT_VOLATILE)
1756 && (flags & QBMAN_DQ_STAT_EXPIRED))
1757 atomic_inc(&s->vdq.busy);
1761 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1762 void qbman_swp_dqrr_consume(struct qbman_swp *s,
1763 const struct qbman_result *dq)
1765 qbman_cinh_write(&s->sys,
1766 QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1769 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1770 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
1773 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
1776 /*********************************/
1777 /* Polling user-provided storage */
1778 /*********************************/
1780 int qbman_result_has_new_result(struct qbman_swp *s,
1781 struct qbman_result *dq)
1783 if (dq->dq.tok == 0)
1787 * Set token to be 0 so we will detect change back to 1
1788 * next time the looping is traversed. Const is cast away here
1789 * as we want users to treat the dequeue responses as read only.
1791 ((struct qbman_result *)dq)->dq.tok = 0;
1794 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1795 * the fact "VDQCR" shows busy doesn't mean that we hold the result
1796 * that makes it available. Eg. we may be looking at our 10th dequeue
1797 * result, having released VDQCR after the 1st result and it is now
1798 * busy due to some other command!
1800 if (s->vdq.storage == dq) {
1801 s->vdq.storage = NULL;
1802 atomic_inc(&s->vdq.busy);
1808 int qbman_check_new_result(struct qbman_result *dq)
1810 if (dq->dq.tok == 0)
1814 * Set token to be 0 so we will detect change back to 1
1815 * next time the looping is traversed. Const is cast away here
1816 * as we want users to treat the dequeue responses as read only.
1818 ((struct qbman_result *)dq)->dq.tok = 0;
1823 int qbman_check_command_complete(struct qbman_result *dq)
1825 struct qbman_swp *s;
1827 if (dq->dq.tok == 0)
1830 s = portal_idx_map[dq->dq.tok - 1];
1832 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1833 * the fact "VDQCR" shows busy doesn't mean that we hold the result
1834 * that makes it available. Eg. we may be looking at our 10th dequeue
1835 * result, having released VDQCR after the 1st result and it is now
1836 * busy due to some other command!
1838 if (s->vdq.storage == dq) {
1839 s->vdq.storage = NULL;
1840 atomic_inc(&s->vdq.busy);
1846 /********************************/
1847 /* Categorising qbman results */
1848 /********************************/
1850 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1853 uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
1855 return (response_verb == x);
1858 int qbman_result_is_DQ(const struct qbman_result *dq)
1860 return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1863 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1865 return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1868 int qbman_result_is_CDAN(const struct qbman_result *dq)
1870 return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1873 int qbman_result_is_CSCN(const struct qbman_result *dq)
1875 return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
1876 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1879 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1881 return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
1884 int qbman_result_is_CGCU(const struct qbman_result *dq)
1886 return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
1889 int qbman_result_is_FQRN(const struct qbman_result *dq)
1891 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1894 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1896 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1899 int qbman_result_is_FQPN(const struct qbman_result *dq)
1901 return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1904 /*********************************/
1905 /* Parsing frame dequeue results */
1906 /*********************************/
1908 /* These APIs assume qbman_result_is_DQ() is TRUE */
1910 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1915 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1917 return dq->dq.seqnum;
1920 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1922 return dq->dq.oprid;
1925 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1930 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1932 return dq->dq.fq_byte_cnt;
1935 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1937 return dq->dq.fq_frm_cnt;
1940 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1942 return dq->dq.fqd_ctx;
1945 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1947 return (const struct qbman_fd *)&dq->dq.fd[0];
1950 /**************************************/
1951 /* Parsing state-change notifications */
1952 /**************************************/
1953 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1955 return scn->scn.state;
1958 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1960 return scn->scn.rid_tok;
1963 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1965 return scn->scn.ctx;
1971 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1973 return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1976 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1978 return !(int)(qbman_result_SCN_state(scn) & 0x1);
1981 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1983 return (int)(qbman_result_SCN_state(scn) & 0x2);
1986 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1988 return (int)(qbman_result_SCN_state(scn) & 0x4);
1991 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1993 return qbman_result_SCN_ctx(scn);
1999 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
2001 return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
2004 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
2006 return qbman_result_SCN_ctx(scn);
2009 /********************/
2010 /* Parsing EQ RESP */
2011 /********************/
2012 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
2014 return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
2017 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
2019 eqresp->eq_resp.rspid = val;
2022 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
2024 return eqresp->eq_resp.rspid;
2027 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
2029 if (eqresp->eq_resp.rc == 0xE)
2035 /******************/
2036 /* Buffer release */
2037 /******************/
2038 #define QB_BR_RC_VALID_SHIFT 5
2039 #define QB_BR_RCDI_SHIFT 6
2041 void qbman_release_desc_clear(struct qbman_release_desc *d)
2043 memset(d, 0, sizeof(*d));
2044 d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
2047 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
2052 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
2055 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
2057 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
2060 #define RAR_IDX(rar) ((rar) & 0x7)
2061 #define RAR_VB(rar) ((rar) & 0x80)
2062 #define RAR_SUCCESS(rar) ((rar) & 0x100)
2064 static int qbman_swp_release_direct(struct qbman_swp *s,
2065 const struct qbman_release_desc *d,
2066 const uint64_t *buffers,
2067 unsigned int num_buffers)
2070 const uint32_t *cl = qb_cl(d);
2071 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2073 pr_debug("RAR=%08x\n", rar);
2074 if (!RAR_SUCCESS(rar))
2077 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2079 /* Start the release command */
2080 p = qbman_cena_write_start_wo_shadow(&s->sys,
2081 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2083 /* Copy the caller's buffer pointers to the command */
2084 u64_to_le32_copy(&p[2], buffers, num_buffers);
2086 /* Set the verb byte, have to substitute in the valid-bit and the
2087 * number of buffers.
2090 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2091 qbman_cena_write_complete_wo_shadow(&s->sys,
2092 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2097 static int qbman_swp_release_mem_back(struct qbman_swp *s,
2098 const struct qbman_release_desc *d,
2099 const uint64_t *buffers,
2100 unsigned int num_buffers)
2103 const uint32_t *cl = qb_cl(d);
2104 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2106 pr_debug("RAR=%08x\n", rar);
2107 if (!RAR_SUCCESS(rar))
2110 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2112 /* Start the release command */
2113 p = qbman_cena_write_start_wo_shadow(&s->sys,
2114 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
2116 /* Copy the caller's buffer pointers to the command */
2117 u64_to_le32_copy(&p[2], buffers, num_buffers);
2119 /* Set the verb byte, have to substitute in the valid-bit and the
2120 * number of buffers.
2122 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2124 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
2125 RAR_IDX(rar) * 4, QMAN_RT_MODE);
2130 inline int qbman_swp_release(struct qbman_swp *s,
2131 const struct qbman_release_desc *d,
2132 const uint64_t *buffers,
2133 unsigned int num_buffers)
2135 return qbman_swp_release_ptr(s, d, buffers, num_buffers);
2138 /*******************/
2139 /* Buffer acquires */
2140 /*******************/
2141 struct qbman_acquire_desc {
2146 uint8_t reserved2[59];
2149 struct qbman_acquire_rslt {
2154 uint8_t reserved2[3];
2158 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
2159 unsigned int num_buffers)
2161 struct qbman_acquire_desc *p;
2162 struct qbman_acquire_rslt *r;
2164 if (!num_buffers || (num_buffers > 7))
2167 /* Start the management command */
2168 p = qbman_swp_mc_start(s);
2173 /* Encode the caller-provided attributes */
2175 p->num = num_buffers;
2177 /* Complete the management command */
2178 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
2180 pr_err("qbman: acquire from BPID %d failed, no response\n",
2185 /* Decode the outcome */
2186 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2188 /* Determine success or failure */
2189 if (r->rslt != QBMAN_MC_RSLT_OK) {
2190 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2195 QBMAN_BUG_ON(r->num > num_buffers);
2197 /* Copy the acquired buffers to the caller's array */
2198 u64_from_le32_copy(buffers, &r->buf[0], r->num);
2206 struct qbman_alt_fq_state_desc {
2208 uint8_t reserved[3];
2210 uint8_t reserved2[56];
2213 struct qbman_alt_fq_state_rslt {
2216 uint8_t reserved[62];
2219 #define ALT_FQ_FQID_MASK 0x00FFFFFF
2221 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
2222 uint8_t alt_fq_verb)
2224 struct qbman_alt_fq_state_desc *p;
2225 struct qbman_alt_fq_state_rslt *r;
2227 /* Start the management command */
2228 p = qbman_swp_mc_start(s);
2232 p->fqid = fqid & ALT_FQ_FQID_MASK;
2234 /* Complete the management command */
2235 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
2237 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
2242 /* Decode the outcome */
2243 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
2245 /* Determine success or failure */
2246 if (r->rslt != QBMAN_MC_RSLT_OK) {
2247 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
2248 fqid, alt_fq_verb, r->rslt);
2255 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
2257 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
2260 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
2262 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
2265 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
2267 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
2270 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
2272 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
2275 /**********************/
2276 /* Channel management */
2277 /**********************/
2279 struct qbman_cdan_ctrl_desc {
2287 uint8_t reserved3[48];
2291 struct qbman_cdan_ctrl_rslt {
2295 uint8_t reserved[60];
2298 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2299 * would be irresponsible to expose it.
2301 #define CODE_CDAN_WE_EN 0x1
2302 #define CODE_CDAN_WE_CTX 0x4
2304 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
2305 uint8_t we_mask, uint8_t cdan_en,
2308 struct qbman_cdan_ctrl_desc *p;
2309 struct qbman_cdan_ctrl_rslt *r;
2311 /* Start the management command */
2312 p = qbman_swp_mc_start(s);
2316 /* Encode the caller-provided attributes */
2325 /* Complete the management command */
2326 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
2328 pr_err("qbman: wqchan config failed, no response\n");
2332 /* Decode the outcome */
2333 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
2334 != QBMAN_WQCHAN_CONFIGURE);
2336 /* Determine success or failure */
2337 if (r->rslt != QBMAN_MC_RSLT_OK) {
2338 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2339 channelid, r->rslt);
2346 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
2349 return qbman_swp_CDAN_set(s, channelid,
2354 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
2356 return qbman_swp_CDAN_set(s, channelid,
2361 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
2363 return qbman_swp_CDAN_set(s, channelid,
2368 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
2371 return qbman_swp_CDAN_set(s, channelid,
2372 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
2376 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
2378 return QBMAN_IDX_FROM_DQRR(dqrr);
2381 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
2383 struct qbman_result *dq;
2385 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));