1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2018-2019 Solarflare Communications Inc.
10 #if EFSYS_OPT_RIVERHEAD
13 * Non-interrupting event queue requires interrupting event queue to
14 * refer to for wake-up events even if wake ups are never used.
15 * It could be even non-allocated event queue.
17 #define EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
20 static __checkReturn boolean_t
23 __in efx_qword_t *eqp,
24 __in const efx_ev_callbacks_t *eecp,
28 __checkReturn efx_rc_t
44 __checkReturn efx_rc_t
47 __in unsigned int index,
48 __in efsys_mem_t *esmp,
58 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
60 /* Set up the handler table */
61 eep->ee_rx = NULL; /* FIXME */
62 eep->ee_tx = NULL; /* FIXME */
63 eep->ee_driver = NULL; /* FIXME */
64 eep->ee_drv_gen = NULL; /* FIXME */
65 eep->ee_mcdi = rhead_ev_mcdi;
67 /* Set up the event queue */
68 /* INIT_EVQ expects function-relative vector number */
69 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
70 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
72 } else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) {
74 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
75 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
77 irq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX;
81 * Interrupts may be raised for events immediately after the queue is
82 * created. See bug58606.
84 rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
92 EFSYS_PROBE1(fail1, efx_rc_t, rc);
101 efx_nic_t *enp = eep->ee_enp;
103 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD);
105 (void) efx_mcdi_fini_evq(enp, eep->ee_index);
108 __checkReturn efx_rc_t
111 __in unsigned int count)
113 efx_nic_t *enp = eep->ee_enp;
117 rptr = count & eep->ee_mask;
119 EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index,
121 /* EVQ_INT_PRIME lives function control window only on Riverhead */
122 EFX_BAR_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword, B_FALSE);
132 _NOTE(ARGUNUSED(eep, data))
134 /* Not implemented yet */
135 EFSYS_ASSERT(B_FALSE);
139 * Poll event queue in batches. Size of the batch is equal to cache line
140 * size divided by event size.
142 * Event queue is written by NIC and read by CPU. If CPU starts reading
143 * of events on the cache line, read all remaining events in a tight
144 * loop while event is present.
146 #define EF100_EV_BATCH 8
149 * Check if event is present.
151 * Riverhead EvQs use a phase bit to indicate the presence of valid events,
152 * by flipping the phase bit on each wrap of the write index.
154 #define EF100_EV_PRESENT(_qword, _phase_bit) \
155 (EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit)
160 __inout unsigned int *countp,
161 __in const efx_ev_callbacks_t *eecp,
164 efx_qword_t ev[EF100_EV_BATCH];
166 unsigned int phase_bit;
172 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
173 EFSYS_ASSERT(countp != NULL);
174 EFSYS_ASSERT(eecp != NULL);
178 /* Read up until the end of the batch period */
179 batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1));
180 phase_bit = (count & (eep->ee_mask + 1)) != 0;
181 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
182 for (total = 0; total < batch; ++total) {
183 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
185 if (!EF100_EV_PRESENT(ev[total], phase_bit))
188 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
189 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
190 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
192 offset += sizeof (efx_qword_t);
195 /* Process the batch of events */
196 for (index = 0; index < total; ++index) {
197 boolean_t should_abort;
200 EFX_EV_QSTAT_INCR(eep, EV_ALL);
202 code = EFX_QWORD_FIELD(ev[index], ESF_GZ_E_TYPE);
204 case ESE_GZ_EF100_EV_MCDI:
205 should_abort = eep->ee_mcdi(eep,
206 &(ev[index]), eecp, arg);
209 EFSYS_PROBE3(bad_event,
210 unsigned int, eep->ee_index,
212 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
214 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
216 EFSYS_ASSERT(eecp->eec_exception != NULL);
217 (void) eecp->eec_exception(arg,
218 EFX_EXCEPTION_EV_ERROR, code);
219 should_abort = B_TRUE;
222 /* Ignore subsequent events */
226 * Poison batch to ensure the outer
227 * loop is broken out of.
229 EFSYS_ASSERT(batch <= EF100_EV_BATCH);
230 batch += (EF100_EV_BATCH << 1);
231 EFSYS_ASSERT(total != batch);
237 * There is no necessity to clear processed events since
238 * phase bit which is flipping on each write index wrap
239 * is used for event presence indication.
244 } while (total == batch);
249 __checkReturn efx_rc_t
252 __in unsigned int us)
254 _NOTE(ARGUNUSED(eep, us))
262 rhead_ev_qstats_update(
264 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
268 for (id = 0; id < EV_NQSTATS; id++) {
269 efsys_stat_t *essp = &stat[id];
271 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
272 eep->ee_stat[id] = 0;
275 #endif /* EFSYS_OPT_QSTATS */
277 static __checkReturn boolean_t
280 __in efx_qword_t *eqp,
281 __in const efx_ev_callbacks_t *eecp,
287 * Event format was changed post Riverhead R1 and now
288 * MCDI event layout on EF100 is exactly the same as on EF10
289 * except added QDMA phase bit which is unused on EF10.
291 ret = ef10_ev_mcdi(eep, eqp, eecp, arg);
296 #endif /* EFSYS_OPT_RIVERHEAD */