1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2018-2019 Solarflare Communications Inc.
10 #if EFSYS_OPT_RIVERHEAD
13 * Non-interrupting event queue requires interrupting event queue to
14 * refer to for wake-up events even if wake ups are never used.
15 * It could be even non-allocated event queue.
17 #define EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
19 static __checkReturn boolean_t
22 __in efx_qword_t *eqp,
23 __in const efx_ev_callbacks_t *eecp,
26 static __checkReturn boolean_t
27 rhead_ev_tx_completion(
29 __in efx_qword_t *eqp,
30 __in const efx_ev_callbacks_t *eecp,
33 static __checkReturn boolean_t
36 __in efx_qword_t *eqp,
37 __in const efx_ev_callbacks_t *eecp,
40 #if EFSYS_OPT_EV_EXTENDED_WIDTH
44 __in efx_xword_t *eventp,
45 __in const efx_ev_callbacks_t *eecp,
51 __inout unsigned int *countp,
52 __in const efx_ev_callbacks_t *eecp,
54 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
57 __checkReturn efx_rc_t
73 __checkReturn efx_rc_t
76 __in unsigned int index,
77 __in efsys_mem_t *esmp,
84 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
89 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
91 desc_size = encp->enc_ev_desc_size;
92 #if EFSYS_OPT_EV_EXTENDED_WIDTH
93 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
94 desc_size = encp->enc_ev_ew_desc_size;
96 EFSYS_ASSERT(desc_size != 0);
98 if (EFSYS_MEM_SIZE(esmp) < (ndescs * desc_size)) {
99 /* Buffer too small for event queue descriptors */
104 /* Set up the handler table */
105 eep->ee_rx = rhead_ev_rx_packets;
106 eep->ee_tx = rhead_ev_tx_completion;
107 eep->ee_driver = NULL; /* FIXME */
108 eep->ee_drv_gen = NULL; /* FIXME */
109 eep->ee_mcdi = rhead_ev_mcdi;
111 /* Set up the event queue */
112 /* INIT_EVQ expects function-relative vector number */
113 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
114 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
116 } else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) {
118 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
119 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
121 irq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX;
125 * Interrupts may be raised for events immediately after the queue is
126 * created. See bug58606.
128 rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
138 EFSYS_PROBE1(fail1, efx_rc_t, rc);
147 efx_nic_t *enp = eep->ee_enp;
149 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD);
151 (void) efx_mcdi_fini_evq(enp, eep->ee_index);
154 __checkReturn efx_rc_t
157 __in unsigned int count)
159 efx_nic_t *enp = eep->ee_enp;
163 rptr = count & eep->ee_mask;
165 EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index,
167 /* EVQ_INT_PRIME lives function control window only on Riverhead */
168 EFX_BAR_FCW_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword);
178 _NOTE(ARGUNUSED(eep, data))
180 /* Not implemented yet */
181 EFSYS_ASSERT(B_FALSE);
185 * Poll event queue in batches. Size of the batch is equal to cache line
186 * size divided by event size.
188 * Event queue is written by NIC and read by CPU. If CPU starts reading
189 * of events on the cache line, read all remaining events in a tight
190 * loop while event is present.
192 #define EF100_EV_BATCH 8
195 * Check if event is present.
197 * Riverhead EvQs use a phase bit to indicate the presence of valid events,
198 * by flipping the phase bit on each wrap of the write index.
200 #define EF100_EV_PRESENT(_qword, _phase_bit) \
201 (EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit)
206 __inout unsigned int *countp,
207 __in const efx_ev_callbacks_t *eecp,
210 efx_qword_t ev[EF100_EV_BATCH];
212 unsigned int phase_bit;
218 #if EFSYS_OPT_EV_EXTENDED_WIDTH
219 if (eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) {
220 rhead_ev_ew_qpoll(eep, countp, eecp, arg);
223 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
225 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
226 EFSYS_ASSERT(countp != NULL);
227 EFSYS_ASSERT(eecp != NULL);
231 /* Read up until the end of the batch period */
232 batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1));
233 phase_bit = (count & (eep->ee_mask + 1)) != 0;
234 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
235 for (total = 0; total < batch; ++total) {
236 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
238 if (!EF100_EV_PRESENT(ev[total], phase_bit))
241 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
242 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
243 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
245 offset += sizeof (efx_qword_t);
248 /* Process the batch of events */
249 for (index = 0; index < total; ++index) {
250 boolean_t should_abort;
253 EFX_EV_QSTAT_INCR(eep, EV_ALL);
255 code = EFX_QWORD_FIELD(ev[index], ESF_GZ_E_TYPE);
257 case ESE_GZ_EF100_EV_RX_PKTS:
258 should_abort = eep->ee_rx(eep,
259 &(ev[index]), eecp, arg);
261 case ESE_GZ_EF100_EV_TX_COMPLETION:
262 should_abort = eep->ee_tx(eep,
263 &(ev[index]), eecp, arg);
265 case ESE_GZ_EF100_EV_MCDI:
266 should_abort = eep->ee_mcdi(eep,
267 &(ev[index]), eecp, arg);
270 EFSYS_PROBE3(bad_event,
271 unsigned int, eep->ee_index,
273 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
275 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
277 EFSYS_ASSERT(eecp->eec_exception != NULL);
278 (void) eecp->eec_exception(arg,
279 EFX_EXCEPTION_EV_ERROR, code);
280 should_abort = B_TRUE;
283 /* Ignore subsequent events */
287 * Poison batch to ensure the outer
288 * loop is broken out of.
290 EFSYS_ASSERT(batch <= EF100_EV_BATCH);
291 batch += (EF100_EV_BATCH << 1);
292 EFSYS_ASSERT(total != batch);
298 * There is no necessity to clear processed events since
299 * phase bit which is flipping on each write index wrap
300 * is used for event presence indication.
305 } while (total == batch);
310 #if EFSYS_OPT_EV_EXTENDED_WIDTH
312 rhead_ev_ew_dispatch(
314 __in efx_xword_t *eventp,
315 __in const efx_ev_callbacks_t *eecp,
318 boolean_t should_abort;
321 EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
323 code = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_256_EV32_TYPE);
326 /* Omit currently unused reserved bits from the probe. */
327 EFSYS_PROBE7(ew_bad_event, unsigned int, eep->ee_index,
328 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_7),
329 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_4),
330 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_3),
331 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_2),
332 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_1),
333 uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_0));
335 EFSYS_ASSERT(eecp->eec_exception != NULL);
336 (void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code);
337 should_abort = B_TRUE;
340 return (should_abort);
344 * Poll extended width event queue. Size of the batch is equal to cache line
345 * size divided by event size.
347 #define EF100_EV_EW_BATCH 2
350 * Check if event is present.
352 * Riverhead EvQs use a phase bit to indicate the presence of valid events,
353 * by flipping the phase bit on each wrap of the write index.
355 #define EF100_EV_EW_PRESENT(_xword, _phase_bit) \
356 (EFX_XWORD_FIELD((_xword), ESF_GZ_EV_256_EV32_PHASE) == (_phase_bit))
361 __inout unsigned int *countp,
362 __in const efx_ev_callbacks_t *eecp,
365 efx_xword_t ev[EF100_EV_EW_BATCH];
367 unsigned int phase_bit;
373 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
374 EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
375 EFSYS_ASSERT(countp != NULL);
376 EFSYS_ASSERT(eecp != NULL);
380 /* Read up until the end of the batch period */
381 batch = EF100_EV_EW_BATCH - (count & (EF100_EV_EW_BATCH - 1));
382 phase_bit = (count & (eep->ee_mask + 1)) != 0;
383 offset = (count & eep->ee_mask) * sizeof (efx_xword_t);
384 for (total = 0; total < batch; ++total) {
385 EFSYS_MEM_READX(eep->ee_esmp, offset, &(ev[total]));
387 if (!EF100_EV_EW_PRESENT(ev[total], phase_bit))
390 /* Omit unused reserved bits from the probe. */
391 EFSYS_PROBE7(ew_event, unsigned int, eep->ee_index,
392 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_7),
393 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_4),
394 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_3),
395 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_2),
396 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_1),
397 uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_0));
399 offset += sizeof (efx_xword_t);
402 /* Process the batch of events */
403 for (index = 0; index < total; ++index) {
404 boolean_t should_abort;
406 EFX_EV_QSTAT_INCR(eep, EV_ALL);
409 rhead_ev_ew_dispatch(eep, &(ev[index]), eecp, arg);
412 /* Ignore subsequent events */
416 * Poison batch to ensure the outer
417 * loop is broken out of.
419 EFSYS_ASSERT(batch <= EF100_EV_EW_BATCH);
420 batch += (EF100_EV_EW_BATCH << 1);
421 EFSYS_ASSERT(total != batch);
427 * There is no necessity to clear processed events since
428 * phase bit which is flipping on each write index wrap
429 * is used for event presence indication.
434 } while (total == batch);
438 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
441 __checkReturn efx_rc_t
444 __in unsigned int us)
446 _NOTE(ARGUNUSED(eep, us))
454 rhead_ev_qstats_update(
456 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
460 for (id = 0; id < EV_NQSTATS; id++) {
461 efsys_stat_t *essp = &stat[id];
463 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
464 eep->ee_stat[id] = 0;
467 #endif /* EFSYS_OPT_QSTATS */
469 static __checkReturn boolean_t
472 __in efx_qword_t *eqp,
473 __in const efx_ev_callbacks_t *eecp,
476 efx_nic_t *enp = eep->ee_enp;
478 uint32_t num_packets;
479 boolean_t should_abort;
481 EFX_EV_QSTAT_INCR(eep, EV_RX);
483 /* Discard events after RXQ/TXQ errors, or hardware not available */
484 if (enp->en_reset_flags &
485 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
488 label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_Q_LABEL);
491 * On EF100 the EV_RX event reports the number of received
492 * packets (unlike EF10 which reports a descriptor index).
493 * The client driver is responsible for maintaining the Rx
494 * descriptor index, and computing how many descriptors are
495 * occupied by each received packet (based on the Rx buffer size
496 * and the packet length from the Rx prefix).
498 num_packets = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_NUM_PKT);
501 * The receive event may indicate more than one packet, and so
502 * does not contain the packet length. Read the packet length
503 * from the prefix when handling each packet.
505 EFSYS_ASSERT(eecp->eec_rx_packets != NULL);
506 should_abort = eecp->eec_rx_packets(arg, label, num_packets,
509 return (should_abort);
512 static __checkReturn boolean_t
513 rhead_ev_tx_completion(
515 __in efx_qword_t *eqp,
516 __in const efx_ev_callbacks_t *eecp,
519 efx_nic_t *enp = eep->ee_enp;
522 boolean_t should_abort;
524 EFX_EV_QSTAT_INCR(eep, EV_TX);
526 /* Discard events after RXQ/TXQ errors, or hardware not available */
527 if (enp->en_reset_flags &
528 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
531 label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_Q_LABEL);
534 * On EF100 the EV_TX event reports the number of completed Tx
535 * descriptors (on EF10, the event reports the low bits of the
536 * index of the last completed descriptor).
537 * The client driver completion callback will compute the
538 * descriptor index, so that is not needed here.
540 num_descs = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_NUM_DESC);
542 EFSYS_PROBE2(tx_ndescs, uint32_t, label, unsigned int, num_descs);
544 EFSYS_ASSERT(eecp->eec_tx_ndescs != NULL);
545 should_abort = eecp->eec_tx_ndescs(arg, label, num_descs);
547 return (should_abort);
550 static __checkReturn boolean_t
553 __in efx_qword_t *eqp,
554 __in const efx_ev_callbacks_t *eecp,
560 * Event format was changed post Riverhead R1 and now
561 * MCDI event layout on EF100 is exactly the same as on EF10
562 * except added QDMA phase bit which is unused on EF10.
564 ret = ef10_ev_mcdi(eep, eqp, eecp, arg);
569 #endif /* EFSYS_OPT_RIVERHEAD */