1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2007-2019 Solarflare Communications Inc.
13 #define EFX_EV_PRESENT(_qword) \
14 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
15 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
21 static __checkReturn efx_rc_t
29 static __checkReturn efx_rc_t
32 __in unsigned int index,
33 __in efsys_mem_t *esmp,
44 static __checkReturn efx_rc_t
47 __in unsigned int count);
54 static __checkReturn efx_rc_t
57 __in unsigned int us);
61 siena_ev_qstats_update(
63 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
67 #endif /* EFSYS_OPT_SIENA */
69 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA
74 __inout unsigned int *countp,
75 __in const efx_ev_callbacks_t *eecp,
78 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */
81 static const efx_ev_ops_t __efx_ev_siena_ops = {
82 siena_ev_init, /* eevo_init */
83 siena_ev_fini, /* eevo_fini */
84 siena_ev_qcreate, /* eevo_qcreate */
85 siena_ev_qdestroy, /* eevo_qdestroy */
86 siena_ev_qprime, /* eevo_qprime */
87 siena_ev_qpost, /* eevo_qpost */
88 siena_ef10_ev_qpoll, /* eevo_qpoll */
89 siena_ev_qmoderate, /* eevo_qmoderate */
91 siena_ev_qstats_update, /* eevo_qstats_update */
94 #endif /* EFSYS_OPT_SIENA */
97 static const efx_ev_ops_t __efx_ev_ef10_ops = {
98 ef10_ev_init, /* eevo_init */
99 ef10_ev_fini, /* eevo_fini */
100 ef10_ev_qcreate, /* eevo_qcreate */
101 ef10_ev_qdestroy, /* eevo_qdestroy */
102 ef10_ev_qprime, /* eevo_qprime */
103 ef10_ev_qpost, /* eevo_qpost */
104 siena_ef10_ev_qpoll, /* eevo_qpoll */
105 ef10_ev_qmoderate, /* eevo_qmoderate */
107 ef10_ev_qstats_update, /* eevo_qstats_update */
110 #endif /* EFX_OPTS_EF10() */
112 #if EFSYS_OPT_RIVERHEAD
113 static const efx_ev_ops_t __efx_ev_rhead_ops = {
114 rhead_ev_init, /* eevo_init */
115 rhead_ev_fini, /* eevo_fini */
116 rhead_ev_qcreate, /* eevo_qcreate */
117 rhead_ev_qdestroy, /* eevo_qdestroy */
118 rhead_ev_qprime, /* eevo_qprime */
119 rhead_ev_qpost, /* eevo_qpost */
120 rhead_ev_qpoll, /* eevo_qpoll */
121 rhead_ev_qmoderate, /* eevo_qmoderate */
123 rhead_ev_qstats_update, /* eevo_qstats_update */
126 #endif /* EFSYS_OPT_RIVERHEAD */
129 __checkReturn efx_rc_t
133 const efx_ev_ops_t *eevop;
136 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
137 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
139 if (enp->en_mod_flags & EFX_MOD_EV) {
144 switch (enp->en_family) {
146 case EFX_FAMILY_SIENA:
147 eevop = &__efx_ev_siena_ops;
149 #endif /* EFSYS_OPT_SIENA */
151 #if EFSYS_OPT_HUNTINGTON
152 case EFX_FAMILY_HUNTINGTON:
153 eevop = &__efx_ev_ef10_ops;
155 #endif /* EFSYS_OPT_HUNTINGTON */
157 #if EFSYS_OPT_MEDFORD
158 case EFX_FAMILY_MEDFORD:
159 eevop = &__efx_ev_ef10_ops;
161 #endif /* EFSYS_OPT_MEDFORD */
163 #if EFSYS_OPT_MEDFORD2
164 case EFX_FAMILY_MEDFORD2:
165 eevop = &__efx_ev_ef10_ops;
167 #endif /* EFSYS_OPT_MEDFORD2 */
169 #if EFSYS_OPT_RIVERHEAD
170 case EFX_FAMILY_RIVERHEAD:
171 eevop = &__efx_ev_rhead_ops;
173 #endif /* EFSYS_OPT_RIVERHEAD */
181 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
183 if ((rc = eevop->eevo_init(enp)) != 0)
186 enp->en_eevop = eevop;
187 enp->en_mod_flags |= EFX_MOD_EV;
194 EFSYS_PROBE1(fail1, efx_rc_t, rc);
196 enp->en_eevop = NULL;
197 enp->en_mod_flags &= ~EFX_MOD_EV;
203 __in const efx_nic_t *enp,
204 __in unsigned int ndescs,
207 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
210 desc_size = encp->enc_ev_desc_size;
212 #if EFSYS_OPT_EV_EXTENDED_WIDTH
213 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
214 desc_size = encp->enc_ev_ew_desc_size;
216 EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0);
219 return (ndescs * desc_size);
222 __checkReturn unsigned int
224 __in const efx_nic_t *enp,
225 __in unsigned int ndescs,
230 size = efx_evq_size(enp, ndescs, flags);
232 return (EFX_DIV_ROUND_UP(size, EFX_BUF_SIZE));
239 const efx_ev_ops_t *eevop = enp->en_eevop;
241 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
242 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
243 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
244 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
245 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
246 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
248 eevop->eevo_fini(enp);
250 enp->en_eevop = NULL;
251 enp->en_mod_flags &= ~EFX_MOD_EV;
255 __checkReturn efx_rc_t
258 __in unsigned int index,
259 __in efsys_mem_t *esmp,
264 __deref_out efx_evq_t **eepp)
266 const efx_ev_ops_t *eevop = enp->en_eevop;
268 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
271 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
272 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
274 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
275 enp->en_nic_cfg.enc_evq_limit);
277 if (index >= encp->enc_evq_limit) {
282 if (us > encp->enc_evq_timer_max_us) {
287 switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
288 case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
290 case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
301 if ((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) &&
302 (encp->enc_ev_ew_desc_size == 0)) {
303 /* Extended width event descriptors are not supported. */
308 EFSYS_ASSERT(ISP2(encp->enc_evq_max_nevs));
309 EFSYS_ASSERT(ISP2(encp->enc_evq_min_nevs));
312 ndescs < encp->enc_evq_min_nevs ||
313 ndescs > encp->enc_evq_max_nevs) {
318 if (EFSYS_MEM_SIZE(esmp) < (ndescs * encp->enc_ev_desc_size)) {
319 /* Buffer too small for event queue descriptors. */
324 /* Allocate an EVQ object */
325 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
331 eep->ee_magic = EFX_EVQ_MAGIC;
333 eep->ee_index = index;
334 eep->ee_mask = ndescs - 1;
335 eep->ee_flags = flags;
339 * Set outputs before the queue is created because interrupts may be
340 * raised for events immediately after the queue is created, before the
341 * function call below returns. See bug58606.
343 * The eepp pointer passed in by the client must therefore point to data
344 * shared with the client's event processing context.
349 if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
360 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
376 EFSYS_PROBE1(fail1, efx_rc_t, rc);
384 efx_nic_t *enp = eep->ee_enp;
385 const efx_ev_ops_t *eevop = enp->en_eevop;
387 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
389 EFSYS_ASSERT(enp->en_ev_qcount != 0);
392 eevop->eevo_qdestroy(eep);
394 /* Free the EVQ object */
395 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
398 __checkReturn efx_rc_t
401 __in unsigned int count)
403 efx_nic_t *enp = eep->ee_enp;
404 const efx_ev_ops_t *eevop = enp->en_eevop;
407 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
409 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
414 if ((rc = eevop->eevo_qprime(eep, count)) != 0)
422 EFSYS_PROBE1(fail1, efx_rc_t, rc);
426 __checkReturn boolean_t
429 __in unsigned int count)
434 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
436 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
437 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
439 return (EFX_EV_PRESENT(qword));
442 #if EFSYS_OPT_EV_PREFETCH
447 __in unsigned int count)
451 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
453 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
454 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
457 #endif /* EFSYS_OPT_EV_PREFETCH */
460 * This method is needed to ensure that eec_initialized callback
461 * is invoked after queue creation. The callback will be invoked
462 * on Riverhead boards which have no support for INIT_DONE events
463 * and will do nothing on other boards.
465 * The client drivers must call this method after calling efx_ev_create().
466 * The call must be done with the same locks being held (if any) which are
467 * normally acquired around efx_ev_qpoll() calls to ensure that
468 * eec_initialized callback is invoked within the same locking context.
471 efx_ev_qcreate_check_init_done(
473 __in const efx_ev_callbacks_t *eecp,
476 const efx_nic_cfg_t *encp;
478 EFSYS_ASSERT(eep != NULL);
479 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
480 EFSYS_ASSERT(eecp != NULL);
481 EFSYS_ASSERT(eecp->eec_initialized != NULL);
483 encp = efx_nic_cfg_get(eep->ee_enp);
485 if (encp->enc_evq_init_done_ev_supported == B_FALSE)
486 (void) eecp->eec_initialized(arg);
492 __inout unsigned int *countp,
493 __in const efx_ev_callbacks_t *eecp,
496 efx_nic_t *enp = eep->ee_enp;
497 const efx_ev_ops_t *eevop = enp->en_eevop;
499 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
501 EFSYS_ASSERT(eevop != NULL &&
502 eevop->eevo_qpoll != NULL);
504 eevop->eevo_qpoll(eep, countp, eecp, arg);
512 efx_nic_t *enp = eep->ee_enp;
513 const efx_ev_ops_t *eevop = enp->en_eevop;
515 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
517 EFSYS_ASSERT(eevop != NULL &&
518 eevop->eevo_qpost != NULL);
520 eevop->eevo_qpost(eep, data);
523 __checkReturn efx_rc_t
524 efx_ev_usecs_to_ticks(
526 __in unsigned int us,
527 __out unsigned int *ticksp)
529 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
533 if (encp->enc_evq_timer_quantum_ns == 0) {
538 /* Convert microseconds to a timer tick count */
541 else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
542 ticks = 1; /* Never round down to zero */
544 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
550 EFSYS_PROBE1(fail1, efx_rc_t, rc);
554 __checkReturn efx_rc_t
557 __in unsigned int us)
559 efx_nic_t *enp = eep->ee_enp;
560 const efx_ev_ops_t *eevop = enp->en_eevop;
563 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
565 if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
566 EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
571 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
579 EFSYS_PROBE1(fail1, efx_rc_t, rc);
585 efx_ev_qstats_update(
587 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
589 { efx_nic_t *enp = eep->ee_enp;
590 const efx_ev_ops_t *eevop = enp->en_eevop;
592 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
594 eevop->eevo_qstats_update(eep, stat);
597 #endif /* EFSYS_OPT_QSTATS */
601 static __checkReturn efx_rc_t
608 * Program the event queue for receive and transmit queue
611 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
612 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
613 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
619 static __checkReturn boolean_t
622 __in efx_qword_t *eqp,
625 __inout uint16_t *flagsp)
627 boolean_t ignore = B_FALSE;
629 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
630 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
631 EFSYS_PROBE(tobe_disc);
633 * Assume this is a unicast address mismatch, unless below
634 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
635 * EV_RX_PAUSE_FRM_ERR is set.
637 (*flagsp) |= EFX_ADDR_MISMATCH;
640 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
641 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
642 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
643 (*flagsp) |= EFX_DISCARD;
645 #if EFSYS_OPT_RX_SCATTER
647 * Lookout for payload queue ran dry errors and ignore them.
649 * Sadly for the header/data split cases, the descriptor
650 * pointer in this event refers to the header queue and
651 * therefore cannot be easily detected as duplicate.
652 * So we drop these and rely on the receive processing seeing
653 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
654 * the partially received packet.
656 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
657 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
658 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
660 #endif /* EFSYS_OPT_RX_SCATTER */
663 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
664 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
665 EFSYS_PROBE(crc_err);
666 (*flagsp) &= ~EFX_ADDR_MISMATCH;
667 (*flagsp) |= EFX_DISCARD;
670 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
671 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
672 EFSYS_PROBE(pause_frm_err);
673 (*flagsp) &= ~EFX_ADDR_MISMATCH;
674 (*flagsp) |= EFX_DISCARD;
677 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
678 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
679 EFSYS_PROBE(owner_id_err);
680 (*flagsp) |= EFX_DISCARD;
683 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
684 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
685 EFSYS_PROBE(ipv4_err);
686 (*flagsp) &= ~EFX_CKSUM_IPV4;
689 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
690 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
691 EFSYS_PROBE(udp_chk_err);
692 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
695 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
696 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
699 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
700 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
703 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
709 static __checkReturn boolean_t
712 __in efx_qword_t *eqp,
713 __in const efx_ev_callbacks_t *eecp,
720 #if EFSYS_OPT_RX_SCATTER
722 boolean_t jumbo_cont;
723 #endif /* EFSYS_OPT_RX_SCATTER */
728 boolean_t should_abort;
730 EFX_EV_QSTAT_INCR(eep, EV_RX);
732 /* Basic packet information */
733 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
734 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
735 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
736 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
738 #if EFSYS_OPT_RX_SCATTER
739 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
740 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
741 #endif /* EFSYS_OPT_RX_SCATTER */
743 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
745 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
748 * If packet is marked as OK and packet type is TCP/IP or
749 * UDP/IP or other IP, then we can rely on the hardware checksums.
752 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
753 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
755 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
756 flags |= EFX_PKT_IPV6;
758 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
759 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
763 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
764 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
766 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
767 flags |= EFX_PKT_IPV6;
769 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
770 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
774 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
776 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
777 flags = EFX_PKT_IPV6;
779 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
780 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
784 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
785 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
790 EFSYS_ASSERT(B_FALSE);
795 #if EFSYS_OPT_RX_SCATTER
796 /* Report scatter and header/lookahead split buffer flags */
798 flags |= EFX_PKT_START;
800 flags |= EFX_PKT_CONT;
801 #endif /* EFSYS_OPT_RX_SCATTER */
803 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
805 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
807 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
808 uint32_t, size, uint16_t, flags);
814 /* If we're not discarding the packet then it is ok */
815 if (~flags & EFX_DISCARD)
816 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
818 /* Detect multicast packets that didn't match the filter */
819 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
820 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
822 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
823 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
825 EFSYS_PROBE(mcast_mismatch);
826 flags |= EFX_ADDR_MISMATCH;
829 flags |= EFX_PKT_UNICAST;
833 * The packet parser in Siena can abort parsing packets under
834 * certain error conditions, setting the PKT_NOT_PARSED bit
835 * (which clears PKT_OK). If this is set, then don't trust
836 * the PKT_TYPE field.
841 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
843 flags |= EFX_CHECK_VLAN;
846 if (~flags & EFX_CHECK_VLAN) {
849 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
850 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
851 flags |= EFX_PKT_VLAN_TAGGED;
854 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
855 uint32_t, size, uint16_t, flags);
857 EFSYS_ASSERT(eecp->eec_rx != NULL);
858 should_abort = eecp->eec_rx(arg, label, id, size, flags);
860 return (should_abort);
863 static __checkReturn boolean_t
866 __in efx_qword_t *eqp,
867 __in const efx_ev_callbacks_t *eecp,
872 boolean_t should_abort;
874 EFX_EV_QSTAT_INCR(eep, EV_TX);
876 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
877 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
878 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
879 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
881 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
882 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
884 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
886 EFSYS_ASSERT(eecp->eec_tx != NULL);
887 should_abort = eecp->eec_tx(arg, label, id);
889 return (should_abort);
892 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
893 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
894 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
895 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
897 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
898 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
900 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
901 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
903 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
904 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
906 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
910 static __checkReturn boolean_t
913 __in efx_qword_t *eqp,
914 __in const efx_ev_callbacks_t *eecp,
917 _NOTE(ARGUNUSED(eqp, eecp, arg))
919 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
924 static __checkReturn boolean_t
927 __in efx_qword_t *eqp,
928 __in const efx_ev_callbacks_t *eecp,
931 boolean_t should_abort;
933 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
934 should_abort = B_FALSE;
936 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
937 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
940 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
942 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
944 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
946 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
947 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
951 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
955 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
956 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
958 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
959 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
962 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
964 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
966 should_abort = eecp->eec_rxq_flush_failed(arg,
969 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
971 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
973 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
978 case FSE_AZ_EVQ_INIT_DONE_EV:
979 EFSYS_ASSERT(eecp->eec_initialized != NULL);
980 should_abort = eecp->eec_initialized(arg);
984 case FSE_AZ_EVQ_NOT_EN_EV:
985 EFSYS_PROBE(evq_not_en);
988 case FSE_AZ_SRM_UPD_DONE_EV: {
991 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
993 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
995 EFSYS_ASSERT(eecp->eec_sram != NULL);
996 should_abort = eecp->eec_sram(arg, code);
1000 case FSE_AZ_WAKE_UP_EV: {
1003 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1005 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1006 should_abort = eecp->eec_wake_up(arg, id);
1010 case FSE_AZ_TX_PKT_NON_TCP_UDP:
1011 EFSYS_PROBE(tx_pkt_non_tcp_udp);
1014 case FSE_AZ_TIMER_EV: {
1017 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1019 EFSYS_ASSERT(eecp->eec_timer != NULL);
1020 should_abort = eecp->eec_timer(arg, id);
1024 case FSE_AZ_RX_DSC_ERROR_EV:
1025 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
1027 EFSYS_PROBE(rx_dsc_error);
1029 EFSYS_ASSERT(eecp->eec_exception != NULL);
1030 should_abort = eecp->eec_exception(arg,
1031 EFX_EXCEPTION_RX_DSC_ERROR, 0);
1035 case FSE_AZ_TX_DSC_ERROR_EV:
1036 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
1038 EFSYS_PROBE(tx_dsc_error);
1040 EFSYS_ASSERT(eecp->eec_exception != NULL);
1041 should_abort = eecp->eec_exception(arg,
1042 EFX_EXCEPTION_TX_DSC_ERROR, 0);
1050 return (should_abort);
1053 static __checkReturn boolean_t
1055 __in efx_evq_t *eep,
1056 __in efx_qword_t *eqp,
1057 __in const efx_ev_callbacks_t *eecp,
1061 boolean_t should_abort;
1063 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1065 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
1066 if (data >= ((uint32_t)1 << 16)) {
1067 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1068 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1069 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1073 EFSYS_ASSERT(eecp->eec_software != NULL);
1074 should_abort = eecp->eec_software(arg, (uint16_t)data);
1076 return (should_abort);
1081 static __checkReturn boolean_t
1083 __in efx_evq_t *eep,
1084 __in efx_qword_t *eqp,
1085 __in const efx_ev_callbacks_t *eecp,
1088 efx_nic_t *enp = eep->ee_enp;
1090 boolean_t should_abort = B_FALSE;
1092 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
1094 if (enp->en_family != EFX_FAMILY_SIENA)
1097 EFSYS_ASSERT(eecp->eec_link_change != NULL);
1098 EFSYS_ASSERT(eecp->eec_exception != NULL);
1099 #if EFSYS_OPT_MON_STATS
1100 EFSYS_ASSERT(eecp->eec_monitor != NULL);
1103 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1105 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1107 case MCDI_EVENT_CODE_BADSSERT:
1108 efx_mcdi_ev_death(enp, EINTR);
1111 case MCDI_EVENT_CODE_CMDDONE:
1112 efx_mcdi_ev_cpl(enp,
1113 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1114 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1115 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1118 case MCDI_EVENT_CODE_LINKCHANGE: {
1119 efx_link_mode_t link_mode;
1121 siena_phy_link_ev(enp, eqp, &link_mode);
1122 should_abort = eecp->eec_link_change(arg, link_mode);
1125 case MCDI_EVENT_CODE_SENSOREVT: {
1126 #if EFSYS_OPT_MON_STATS
1128 efx_mon_stat_value_t value;
1131 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
1132 should_abort = eecp->eec_monitor(arg, id, value);
1133 else if (rc == ENOTSUP) {
1134 should_abort = eecp->eec_exception(arg,
1135 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1136 MCDI_EV_FIELD(eqp, DATA));
1138 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1140 should_abort = B_FALSE;
1144 case MCDI_EVENT_CODE_SCHEDERR:
1145 /* Informational only */
1148 case MCDI_EVENT_CODE_REBOOT:
1149 efx_mcdi_ev_death(enp, EIO);
1152 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1153 #if EFSYS_OPT_MAC_STATS
1154 if (eecp->eec_mac_stats != NULL) {
1155 eecp->eec_mac_stats(arg,
1156 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1161 case MCDI_EVENT_CODE_FWALERT: {
1162 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1164 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1165 should_abort = eecp->eec_exception(arg,
1166 EFX_EXCEPTION_FWALERT_SRAM,
1167 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1169 should_abort = eecp->eec_exception(arg,
1170 EFX_EXCEPTION_UNKNOWN_FWALERT,
1171 MCDI_EV_FIELD(eqp, DATA));
1176 EFSYS_PROBE1(mc_pcol_error, int, code);
1181 return (should_abort);
1184 #endif /* EFSYS_OPT_MCDI */
1186 static __checkReturn efx_rc_t
1188 __in efx_evq_t *eep,
1189 __in unsigned int count)
1191 efx_nic_t *enp = eep->ee_enp;
1195 rptr = count & eep->ee_mask;
1197 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
1199 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
1207 __in efx_evq_t *eep,
1210 efx_nic_t *enp = eep->ee_enp;
1214 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
1215 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
1217 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
1218 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
1219 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
1221 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
1224 static __checkReturn efx_rc_t
1226 __in efx_evq_t *eep,
1227 __in unsigned int us)
1229 efx_nic_t *enp = eep->ee_enp;
1230 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1231 unsigned int locked;
1235 if (us > encp->enc_evq_timer_max_us) {
1240 /* If the value is zero then disable the timer */
1242 EFX_POPULATE_DWORD_2(dword,
1243 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
1244 FRF_CZ_TC_TIMER_VAL, 0);
1248 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
1251 EFSYS_ASSERT(ticks > 0);
1252 EFX_POPULATE_DWORD_2(dword,
1253 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
1254 FRF_CZ_TC_TIMER_VAL, ticks - 1);
1257 locked = (eep->ee_index == 0) ? 1 : 0;
1259 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
1260 eep->ee_index, &dword, locked);
1267 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1272 static __checkReturn efx_rc_t
1274 __in efx_nic_t *enp,
1275 __in unsigned int index,
1276 __in efsys_mem_t *esmp,
1280 __in uint32_t flags,
1281 __in efx_evq_t *eep)
1283 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1287 boolean_t notify_mode;
1289 _NOTE(ARGUNUSED(esmp))
1291 EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0);
1293 #if EFSYS_OPT_RX_SCALE
1294 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
1295 index >= EFX_MAXRSS_LEGACY) {
1301 (1U << size) <= encp->enc_evq_max_nevs / encp->enc_evq_min_nevs;
1303 if ((1U << size) == (uint32_t)ndescs / encp->enc_evq_min_nevs)
1305 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1310 /* Set up the handler table */
1311 eep->ee_rx = siena_ev_rx;
1312 eep->ee_tx = siena_ev_tx;
1313 eep->ee_driver = siena_ev_driver;
1314 eep->ee_global = siena_ev_global;
1315 eep->ee_drv_gen = siena_ev_drv_gen;
1317 eep->ee_mcdi = siena_ev_mcdi;
1318 #endif /* EFSYS_OPT_MCDI */
1320 notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
1321 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
1323 /* Set up the new event queue */
1324 EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
1325 FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
1326 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1327 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
1329 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
1330 FRF_AZ_EVQ_BUF_BASE_ID, id);
1332 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
1334 /* Set initial interrupt moderation */
1335 siena_ev_qmoderate(eep, us);
1341 #if EFSYS_OPT_RX_SCALE
1344 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1349 #endif /* EFSYS_OPT_SIENA */
1351 #if EFSYS_OPT_QSTATS
1353 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock ac223f7134058b4f */
1354 static const char * const __efx_ev_qstat_name[] = {
1361 "rx_buf_owner_id_err",
1362 "rx_ipv4_hdr_chksum_err",
1363 "rx_tcp_udp_chksum_err",
1367 "rx_mcast_hash_match",
1384 "driver_srm_upd_done",
1385 "driver_tx_descq_fls_done",
1386 "driver_rx_descq_fls_done",
1387 "driver_rx_descq_fls_failed",
1388 "driver_rx_dsc_error",
1389 "driver_tx_dsc_error",
1392 "rx_parse_incomplete",
1394 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1398 __in efx_nic_t *enp,
1399 __in unsigned int id)
1401 _NOTE(ARGUNUSED(enp))
1403 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1404 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1406 return (__efx_ev_qstat_name[id]);
1408 #endif /* EFSYS_OPT_NAMES */
1409 #endif /* EFSYS_OPT_QSTATS */
1413 #if EFSYS_OPT_QSTATS
1415 siena_ev_qstats_update(
1416 __in efx_evq_t *eep,
1417 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1421 for (id = 0; id < EV_NQSTATS; id++) {
1422 efsys_stat_t *essp = &stat[id];
1424 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1425 eep->ee_stat[id] = 0;
1428 #endif /* EFSYS_OPT_QSTATS */
1432 __in efx_evq_t *eep)
1434 efx_nic_t *enp = eep->ee_enp;
1437 /* Purge event queue */
1438 EFX_ZERO_OWORD(oword);
1440 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1441 eep->ee_index, &oword, B_TRUE);
1443 EFX_ZERO_OWORD(oword);
1444 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
1449 __in efx_nic_t *enp)
1451 _NOTE(ARGUNUSED(enp))
1454 #endif /* EFSYS_OPT_SIENA */
1456 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA
1458 #define EFX_EV_BATCH 8
1461 siena_ef10_ev_qpoll(
1462 __in efx_evq_t *eep,
1463 __inout unsigned int *countp,
1464 __in const efx_ev_callbacks_t *eecp,
1467 efx_qword_t ev[EFX_EV_BATCH];
1474 /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
1475 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
1476 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
1478 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
1479 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
1480 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
1481 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
1482 FSE_AZ_EV_CODE_DRV_GEN_EV);
1484 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
1485 FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
1488 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
1489 EFSYS_ASSERT(countp != NULL);
1490 EFSYS_ASSERT(eecp != NULL);
1494 /* Read up until the end of the batch period */
1495 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
1496 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1497 for (total = 0; total < batch; ++total) {
1498 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
1500 if (!EFX_EV_PRESENT(ev[total]))
1503 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
1504 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
1505 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
1507 offset += sizeof (efx_qword_t);
1510 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
1512 * Prefetch the next batch when we get within PREFETCH_PERIOD
1513 * of a completed batch. If the batch is smaller, then prefetch
1516 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
1517 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1518 #endif /* EFSYS_OPT_EV_PREFETCH */
1520 /* Process the batch of events */
1521 for (index = 0; index < total; ++index) {
1522 boolean_t should_abort;
1525 #if EFSYS_OPT_EV_PREFETCH
1526 /* Prefetch if we've now reached the batch period */
1527 if (total == batch &&
1528 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
1529 offset = (count + batch) & eep->ee_mask;
1530 offset *= sizeof (efx_qword_t);
1532 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1534 #endif /* EFSYS_OPT_EV_PREFETCH */
1536 EFX_EV_QSTAT_INCR(eep, EV_ALL);
1538 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
1540 case FSE_AZ_EV_CODE_RX_EV:
1541 should_abort = eep->ee_rx(eep,
1542 &(ev[index]), eecp, arg);
1544 case FSE_AZ_EV_CODE_TX_EV:
1545 should_abort = eep->ee_tx(eep,
1546 &(ev[index]), eecp, arg);
1548 case FSE_AZ_EV_CODE_DRIVER_EV:
1549 should_abort = eep->ee_driver(eep,
1550 &(ev[index]), eecp, arg);
1552 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1553 should_abort = eep->ee_drv_gen(eep,
1554 &(ev[index]), eecp, arg);
1557 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
1558 should_abort = eep->ee_mcdi(eep,
1559 &(ev[index]), eecp, arg);
1562 case FSE_AZ_EV_CODE_GLOBAL_EV:
1563 if (eep->ee_global) {
1564 should_abort = eep->ee_global(eep,
1565 &(ev[index]), eecp, arg);
1568 /* else fallthrough */
1570 EFSYS_PROBE3(bad_event,
1571 unsigned int, eep->ee_index,
1573 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
1575 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
1577 EFSYS_ASSERT(eecp->eec_exception != NULL);
1578 (void) eecp->eec_exception(arg,
1579 EFX_EXCEPTION_EV_ERROR, code);
1580 should_abort = B_TRUE;
1583 /* Ignore subsequent events */
1587 * Poison batch to ensure the outer
1588 * loop is broken out of.
1590 EFSYS_ASSERT(batch <= EFX_EV_BATCH);
1591 batch += (EFX_EV_BATCH << 1);
1592 EFSYS_ASSERT(total != batch);
1598 * Now that the hardware has most likely moved onto dma'ing
1599 * into the next cache line, clear the processed events. Take
1600 * care to only clear out events that we've processed
1602 EFX_SET_QWORD(ev[0]);
1603 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1604 for (index = 0; index < total; ++index) {
1605 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
1606 offset += sizeof (efx_qword_t);
1611 } while (total == batch);
1616 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */