1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2018-2019 Solarflare Communications Inc.
10 #if EFSYS_OPT_RIVERHEAD
13 * Non-interrupting event queue requires interrupting event queue to
14 * refer to for wake-up events even if wake ups are never used.
15 * It could be even non-allocated event queue.
17 #define EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
22 __in efx_qword_t *eventp,
23 __in const efx_ev_callbacks_t *eecp,
26 static __checkReturn boolean_t
29 __in efx_qword_t *eqp,
30 __in const efx_ev_callbacks_t *eecp,
33 static __checkReturn boolean_t
34 rhead_ev_tx_completion(
36 __in efx_qword_t *eqp,
37 __in const efx_ev_callbacks_t *eecp,
40 static __checkReturn boolean_t
43 __in efx_qword_t *eqp,
44 __in const efx_ev_callbacks_t *eecp,
47 #if EFSYS_OPT_EV_EXTENDED_WIDTH
51 __in efx_xword_t *eventp,
52 __in const efx_ev_callbacks_t *eecp,
58 __inout unsigned int *countp,
59 __in const efx_ev_callbacks_t *eecp,
61 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
64 __checkReturn efx_rc_t
80 __checkReturn efx_rc_t
83 __in unsigned int index,
84 __in efsys_mem_t *esmp,
91 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
96 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
98 desc_size = encp->enc_ev_desc_size;
99 #if EFSYS_OPT_EV_EXTENDED_WIDTH
100 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
101 desc_size = encp->enc_ev_ew_desc_size;
103 EFSYS_ASSERT(desc_size != 0);
105 if (EFSYS_MEM_SIZE(esmp) < (ndescs * desc_size)) {
106 /* Buffer too small for event queue descriptors */
111 /* Set up the handler table */
112 eep->ee_rx = rhead_ev_rx_packets;
113 eep->ee_tx = rhead_ev_tx_completion;
114 eep->ee_driver = NULL; /* FIXME */
115 eep->ee_drv_gen = NULL; /* FIXME */
116 eep->ee_mcdi = rhead_ev_mcdi;
118 /* Set up the event queue */
119 /* INIT_EVQ expects function-relative vector number */
120 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
121 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
123 } else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) {
125 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
126 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
128 irq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX;
132 * Interrupts may be raised for events immediately after the queue is
133 * created. See bug58606.
135 rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
145 EFSYS_PROBE1(fail1, efx_rc_t, rc);
154 efx_nic_t *enp = eep->ee_enp;
156 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD);
158 (void) efx_mcdi_fini_evq(enp, eep->ee_index);
161 __checkReturn efx_rc_t
164 __in unsigned int count)
166 efx_nic_t *enp = eep->ee_enp;
170 rptr = count & eep->ee_mask;
172 EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index,
174 /* EVQ_INT_PRIME lives function control window only on Riverhead */
175 EFX_BAR_FCW_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword);
185 _NOTE(ARGUNUSED(eep, data))
187 /* Not implemented yet */
188 EFSYS_ASSERT(B_FALSE);
191 static __checkReturn boolean_t
194 __in efx_qword_t *eventp,
195 __in const efx_ev_callbacks_t *eecp,
198 boolean_t should_abort;
201 code = EFX_QWORD_FIELD(*eventp, ESF_GZ_E_TYPE);
203 case ESE_GZ_EF100_EV_RX_PKTS:
204 should_abort = eep->ee_rx(eep, eventp, eecp, arg);
206 case ESE_GZ_EF100_EV_TX_COMPLETION:
207 should_abort = eep->ee_tx(eep, eventp, eecp, arg);
209 case ESE_GZ_EF100_EV_MCDI:
210 should_abort = eep->ee_mcdi(eep, eventp, eecp, arg);
213 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
214 uint32_t, EFX_QWORD_FIELD(*eventp, EFX_DWORD_1),
215 uint32_t, EFX_QWORD_FIELD(*eventp, EFX_DWORD_0));
217 EFSYS_ASSERT(eecp->eec_exception != NULL);
218 (void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code);
219 should_abort = B_TRUE;
223 return (should_abort);
227 * Poll event queue in batches. Size of the batch is equal to cache line
228 * size divided by event size.
230 * Event queue is written by NIC and read by CPU. If CPU starts reading
231 * of events on the cache line, read all remaining events in a tight
232 * loop while event is present.
234 #define EF100_EV_BATCH 8
237 * Check if event is present.
239 * Riverhead EvQs use a phase bit to indicate the presence of valid events,
240 * by flipping the phase bit on each wrap of the write index.
242 #define EF100_EV_PRESENT(_qword, _phase_bit) \
243 (EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit)
248 __inout unsigned int *countp,
249 __in const efx_ev_callbacks_t *eecp,
252 efx_qword_t ev[EF100_EV_BATCH];
254 unsigned int phase_bit;
260 #if EFSYS_OPT_EV_EXTENDED_WIDTH
261 if (eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) {
262 rhead_ev_ew_qpoll(eep, countp, eecp, arg);
265 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
267 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
268 EFSYS_ASSERT(countp != NULL);
269 EFSYS_ASSERT(eecp != NULL);
273 /* Read up until the end of the batch period */
274 batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1));
275 phase_bit = (count & (eep->ee_mask + 1)) != 0;
276 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
277 for (total = 0; total < batch; ++total) {
278 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
280 if (!EF100_EV_PRESENT(ev[total], phase_bit))
283 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
284 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
285 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
287 offset += sizeof (efx_qword_t);
290 /* Process the batch of events */
291 for (index = 0; index < total; ++index) {
292 boolean_t should_abort;
294 EFX_EV_QSTAT_INCR(eep, EV_ALL);
297 rhead_ev_dispatch(eep, &(ev[index]), eecp, arg);
300 /* Ignore subsequent events */
304 * Poison batch to ensure the outer
305 * loop is broken out of.
307 EFSYS_ASSERT(batch <= EF100_EV_BATCH);
308 batch += (EF100_EV_BATCH << 1);
309 EFSYS_ASSERT(total != batch);
315 * There is no necessity to clear processed events since
316 * phase bit which is flipping on each write index wrap
317 * is used for event presence indication.
322 } while (total == batch);
327 #if EFSYS_OPT_EV_EXTENDED_WIDTH
329 rhead_ev_ew_dispatch(
331 __in efx_xword_t *eventp,
332 __in const efx_ev_callbacks_t *eecp,
335 boolean_t should_abort;
338 EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
340 code = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_256_EV32_TYPE);
342 case ESE_GZ_EF100_EVEW_64BIT:
343 /* NOTE: ignore phase bit in encapsulated 64bit event. */
345 rhead_ev_dispatch(eep, &eventp->ex_qword[0], eecp, arg);
349 /* Omit currently unused reserved bits from the probe. */
350 EFSYS_PROBE7(ew_bad_event, unsigned int, eep->ee_index,
351 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_7),
352 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_4),
353 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_3),
354 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_2),
355 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_1),
356 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_0));
358 EFSYS_ASSERT(eecp->eec_exception != NULL);
359 (void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code);
360 should_abort = B_TRUE;
363 return (should_abort);
367 * Poll extended width event queue. Size of the batch is equal to cache line
368 * size divided by event size.
370 #define EF100_EV_EW_BATCH 2
373 * Check if event is present.
375 * Riverhead EvQs use a phase bit to indicate the presence of valid events,
376 * by flipping the phase bit on each wrap of the write index.
378 #define EF100_EV_EW_PRESENT(_xword, _phase_bit) \
379 (EFX_XWORD_FIELD((_xword), ESF_GZ_EV_256_EV32_PHASE) == (_phase_bit))
384 __inout unsigned int *countp,
385 __in const efx_ev_callbacks_t *eecp,
388 efx_xword_t ev[EF100_EV_EW_BATCH];
390 unsigned int phase_bit;
396 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
397 EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
398 EFSYS_ASSERT(countp != NULL);
399 EFSYS_ASSERT(eecp != NULL);
403 /* Read up until the end of the batch period */
404 batch = EF100_EV_EW_BATCH - (count & (EF100_EV_EW_BATCH - 1));
405 phase_bit = (count & (eep->ee_mask + 1)) != 0;
406 offset = (count & eep->ee_mask) * sizeof (efx_xword_t);
407 for (total = 0; total < batch; ++total) {
408 EFSYS_MEM_READX(eep->ee_esmp, offset, &(ev[total]));
410 if (!EF100_EV_EW_PRESENT(ev[total], phase_bit))
413 /* Omit unused reserved bits from the probe. */
414 EFSYS_PROBE7(ew_event, unsigned int, eep->ee_index,
415 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_7),
416 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_4),
417 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_3),
418 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_2),
419 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_1),
420 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_0));
422 offset += sizeof (efx_xword_t);
425 /* Process the batch of events */
426 for (index = 0; index < total; ++index) {
427 boolean_t should_abort;
429 EFX_EV_QSTAT_INCR(eep, EV_ALL);
432 rhead_ev_ew_dispatch(eep, &(ev[index]), eecp, arg);
435 /* Ignore subsequent events */
439 * Poison batch to ensure the outer
440 * loop is broken out of.
442 EFSYS_ASSERT(batch <= EF100_EV_EW_BATCH);
443 batch += (EF100_EV_EW_BATCH << 1);
444 EFSYS_ASSERT(total != batch);
450 * There is no necessity to clear processed events since
451 * phase bit which is flipping on each write index wrap
452 * is used for event presence indication.
457 } while (total == batch);
461 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
464 __checkReturn efx_rc_t
467 __in unsigned int us)
469 _NOTE(ARGUNUSED(eep, us))
477 rhead_ev_qstats_update(
479 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
483 for (id = 0; id < EV_NQSTATS; id++) {
484 efsys_stat_t *essp = &stat[id];
486 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
487 eep->ee_stat[id] = 0;
490 #endif /* EFSYS_OPT_QSTATS */
492 static __checkReturn boolean_t
495 __in efx_qword_t *eqp,
496 __in const efx_ev_callbacks_t *eecp,
499 efx_nic_t *enp = eep->ee_enp;
501 uint32_t num_packets;
502 boolean_t should_abort;
504 EFX_EV_QSTAT_INCR(eep, EV_RX);
506 /* Discard events after RXQ/TXQ errors, or hardware not available */
507 if (enp->en_reset_flags &
508 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
511 label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_Q_LABEL);
514 * On EF100 the EV_RX event reports the number of received
515 * packets (unlike EF10 which reports a descriptor index).
516 * The client driver is responsible for maintaining the Rx
517 * descriptor index, and computing how many descriptors are
518 * occupied by each received packet (based on the Rx buffer size
519 * and the packet length from the Rx prefix).
521 num_packets = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_NUM_PKT);
524 * The receive event may indicate more than one packet, and so
525 * does not contain the packet length. Read the packet length
526 * from the prefix when handling each packet.
528 EFSYS_ASSERT(eecp->eec_rx_packets != NULL);
529 should_abort = eecp->eec_rx_packets(arg, label, num_packets,
532 return (should_abort);
535 static __checkReturn boolean_t
536 rhead_ev_tx_completion(
538 __in efx_qword_t *eqp,
539 __in const efx_ev_callbacks_t *eecp,
542 efx_nic_t *enp = eep->ee_enp;
545 boolean_t should_abort;
547 EFX_EV_QSTAT_INCR(eep, EV_TX);
549 /* Discard events after RXQ/TXQ errors, or hardware not available */
550 if (enp->en_reset_flags &
551 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
554 label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_Q_LABEL);
557 * On EF100 the EV_TX event reports the number of completed Tx
558 * descriptors (on EF10, the event reports the low bits of the
559 * index of the last completed descriptor).
560 * The client driver completion callback will compute the
561 * descriptor index, so that is not needed here.
563 num_descs = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_NUM_DESC);
565 EFSYS_PROBE2(tx_ndescs, uint32_t, label, unsigned int, num_descs);
567 EFSYS_ASSERT(eecp->eec_tx_ndescs != NULL);
568 should_abort = eecp->eec_tx_ndescs(arg, label, num_descs);
570 return (should_abort);
573 static __checkReturn boolean_t
576 __in efx_qword_t *eqp,
577 __in const efx_ev_callbacks_t *eecp,
583 * Event format was changed post Riverhead R1 and now
584 * MCDI event layout on EF100 is exactly the same as on EF10
585 * except added QDMA phase bit which is unused on EF10.
587 ret = ef10_ev_mcdi(eep, eqp, eecp, arg);
592 #endif /* EFSYS_OPT_RIVERHEAD */