1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2018-2019 Solarflare Communications Inc.
10 #if EFSYS_OPT_RIVERHEAD
13 * Non-interrupting event queue requires interrupting event queue to
14 * refer to for wake-up events even if wake ups are never used.
15 * It could be even non-allocated event queue.
17 #define EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
19 static __checkReturn boolean_t
22 __in efx_qword_t *eqp,
23 __in const efx_ev_callbacks_t *eecp,
26 static __checkReturn boolean_t
27 rhead_ev_tx_completion(
29 __in efx_qword_t *eqp,
30 __in const efx_ev_callbacks_t *eecp,
34 static __checkReturn boolean_t
37 __in efx_qword_t *eqp,
38 __in const efx_ev_callbacks_t *eecp,
42 __checkReturn efx_rc_t
58 __checkReturn efx_rc_t
61 __in unsigned int index,
62 __in efsys_mem_t *esmp,
69 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
74 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
76 desc_size = encp->enc_ev_desc_size;
77 #if EFSYS_OPT_EV_EXTENDED_WIDTH
78 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
79 desc_size = encp->enc_ev_ew_desc_size;
81 EFSYS_ASSERT(desc_size != 0);
83 if (EFSYS_MEM_SIZE(esmp) < (ndescs * desc_size)) {
84 /* Buffer too small for event queue descriptors */
89 /* Set up the handler table */
90 eep->ee_rx = rhead_ev_rx_packets;
91 eep->ee_tx = rhead_ev_tx_completion;
92 eep->ee_driver = NULL; /* FIXME */
93 eep->ee_drv_gen = NULL; /* FIXME */
94 eep->ee_mcdi = rhead_ev_mcdi;
96 /* Set up the event queue */
97 /* INIT_EVQ expects function-relative vector number */
98 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
99 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
101 } else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) {
103 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
104 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
106 irq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX;
110 * Interrupts may be raised for events immediately after the queue is
111 * created. See bug58606.
113 rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
123 EFSYS_PROBE1(fail1, efx_rc_t, rc);
132 efx_nic_t *enp = eep->ee_enp;
134 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD);
136 (void) efx_mcdi_fini_evq(enp, eep->ee_index);
139 __checkReturn efx_rc_t
142 __in unsigned int count)
144 efx_nic_t *enp = eep->ee_enp;
148 rptr = count & eep->ee_mask;
150 EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index,
152 /* EVQ_INT_PRIME lives function control window only on Riverhead */
153 EFX_BAR_FCW_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword);
163 _NOTE(ARGUNUSED(eep, data))
165 /* Not implemented yet */
166 EFSYS_ASSERT(B_FALSE);
170 * Poll event queue in batches. Size of the batch is equal to cache line
171 * size divided by event size.
173 * Event queue is written by NIC and read by CPU. If CPU starts reading
174 * of events on the cache line, read all remaining events in a tight
175 * loop while event is present.
177 #define EF100_EV_BATCH 8
180 * Check if event is present.
182 * Riverhead EvQs use a phase bit to indicate the presence of valid events,
183 * by flipping the phase bit on each wrap of the write index.
185 #define EF100_EV_PRESENT(_qword, _phase_bit) \
186 (EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit)
191 __inout unsigned int *countp,
192 __in const efx_ev_callbacks_t *eecp,
195 efx_qword_t ev[EF100_EV_BATCH];
197 unsigned int phase_bit;
203 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
204 EFSYS_ASSERT(countp != NULL);
205 EFSYS_ASSERT(eecp != NULL);
209 /* Read up until the end of the batch period */
210 batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1));
211 phase_bit = (count & (eep->ee_mask + 1)) != 0;
212 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
213 for (total = 0; total < batch; ++total) {
214 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
216 if (!EF100_EV_PRESENT(ev[total], phase_bit))
219 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
220 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
221 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
223 offset += sizeof (efx_qword_t);
226 /* Process the batch of events */
227 for (index = 0; index < total; ++index) {
228 boolean_t should_abort;
231 EFX_EV_QSTAT_INCR(eep, EV_ALL);
233 code = EFX_QWORD_FIELD(ev[index], ESF_GZ_E_TYPE);
235 case ESE_GZ_EF100_EV_RX_PKTS:
236 should_abort = eep->ee_rx(eep,
237 &(ev[index]), eecp, arg);
239 case ESE_GZ_EF100_EV_TX_COMPLETION:
240 should_abort = eep->ee_tx(eep,
241 &(ev[index]), eecp, arg);
243 case ESE_GZ_EF100_EV_MCDI:
244 should_abort = eep->ee_mcdi(eep,
245 &(ev[index]), eecp, arg);
248 EFSYS_PROBE3(bad_event,
249 unsigned int, eep->ee_index,
251 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
253 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
255 EFSYS_ASSERT(eecp->eec_exception != NULL);
256 (void) eecp->eec_exception(arg,
257 EFX_EXCEPTION_EV_ERROR, code);
258 should_abort = B_TRUE;
261 /* Ignore subsequent events */
265 * Poison batch to ensure the outer
266 * loop is broken out of.
268 EFSYS_ASSERT(batch <= EF100_EV_BATCH);
269 batch += (EF100_EV_BATCH << 1);
270 EFSYS_ASSERT(total != batch);
276 * There is no necessity to clear processed events since
277 * phase bit which is flipping on each write index wrap
278 * is used for event presence indication.
283 } while (total == batch);
288 __checkReturn efx_rc_t
291 __in unsigned int us)
293 _NOTE(ARGUNUSED(eep, us))
301 rhead_ev_qstats_update(
303 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
307 for (id = 0; id < EV_NQSTATS; id++) {
308 efsys_stat_t *essp = &stat[id];
310 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
311 eep->ee_stat[id] = 0;
314 #endif /* EFSYS_OPT_QSTATS */
316 static __checkReturn boolean_t
319 __in efx_qword_t *eqp,
320 __in const efx_ev_callbacks_t *eecp,
323 efx_nic_t *enp = eep->ee_enp;
325 uint32_t num_packets;
326 boolean_t should_abort;
328 EFX_EV_QSTAT_INCR(eep, EV_RX);
330 /* Discard events after RXQ/TXQ errors, or hardware not available */
331 if (enp->en_reset_flags &
332 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
335 label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_Q_LABEL);
338 * On EF100 the EV_RX event reports the number of received
339 * packets (unlike EF10 which reports a descriptor index).
340 * The client driver is responsible for maintaining the Rx
341 * descriptor index, and computing how many descriptors are
342 * occupied by each received packet (based on the Rx buffer size
343 * and the packet length from the Rx prefix).
345 num_packets = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_NUM_PKT);
348 * The receive event may indicate more than one packet, and so
349 * does not contain the packet length. Read the packet length
350 * from the prefix when handling each packet.
352 EFSYS_ASSERT(eecp->eec_rx_packets != NULL);
353 should_abort = eecp->eec_rx_packets(arg, label, num_packets,
356 return (should_abort);
359 static __checkReturn boolean_t
360 rhead_ev_tx_completion(
362 __in efx_qword_t *eqp,
363 __in const efx_ev_callbacks_t *eecp,
366 efx_nic_t *enp = eep->ee_enp;
369 boolean_t should_abort;
371 EFX_EV_QSTAT_INCR(eep, EV_TX);
373 /* Discard events after RXQ/TXQ errors, or hardware not available */
374 if (enp->en_reset_flags &
375 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
378 label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_Q_LABEL);
381 * On EF100 the EV_TX event reports the number of completed Tx
382 * descriptors (on EF10, the event reports the low bits of the
383 * index of the last completed descriptor).
384 * The client driver completion callback will compute the
385 * descriptor index, so that is not needed here.
387 num_descs = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_NUM_DESC);
389 EFSYS_PROBE2(tx_ndescs, uint32_t, label, unsigned int, num_descs);
391 EFSYS_ASSERT(eecp->eec_tx_ndescs != NULL);
392 should_abort = eecp->eec_tx_ndescs(arg, label, num_descs);
394 return (should_abort);
397 static __checkReturn boolean_t
400 __in efx_qword_t *eqp,
401 __in const efx_ev_callbacks_t *eecp,
407 * Event format was changed post Riverhead R1 and now
408 * MCDI event layout on EF100 is exactly the same as on EF10
409 * except added QDMA phase bit which is unused on EF10.
411 ret = ef10_ev_mcdi(eep, eqp, eecp, arg);
416 #endif /* EFSYS_OPT_RIVERHEAD */