1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2007-2019 Solarflare Communications Inc.
13 #define EFX_EV_PRESENT(_qword) \
14 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
15 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
21 static __checkReturn efx_rc_t
29 static __checkReturn efx_rc_t
32 __in unsigned int index,
33 __in efsys_mem_t *esmp,
45 static __checkReturn efx_rc_t
48 __in unsigned int count);
55 static __checkReturn efx_rc_t
58 __in unsigned int us);
62 siena_ev_qstats_update(
64 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
68 #endif /* EFSYS_OPT_SIENA */
70 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA
75 __inout unsigned int *countp,
76 __in const efx_ev_callbacks_t *eecp,
79 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */
82 static const efx_ev_ops_t __efx_ev_siena_ops = {
83 siena_ev_init, /* eevo_init */
84 siena_ev_fini, /* eevo_fini */
85 siena_ev_qcreate, /* eevo_qcreate */
86 siena_ev_qdestroy, /* eevo_qdestroy */
87 siena_ev_qprime, /* eevo_qprime */
88 siena_ev_qpost, /* eevo_qpost */
89 siena_ef10_ev_qpoll, /* eevo_qpoll */
90 siena_ev_qmoderate, /* eevo_qmoderate */
92 siena_ev_qstats_update, /* eevo_qstats_update */
95 #endif /* EFSYS_OPT_SIENA */
98 static const efx_ev_ops_t __efx_ev_ef10_ops = {
99 ef10_ev_init, /* eevo_init */
100 ef10_ev_fini, /* eevo_fini */
101 ef10_ev_qcreate, /* eevo_qcreate */
102 ef10_ev_qdestroy, /* eevo_qdestroy */
103 ef10_ev_qprime, /* eevo_qprime */
104 ef10_ev_qpost, /* eevo_qpost */
105 siena_ef10_ev_qpoll, /* eevo_qpoll */
106 ef10_ev_qmoderate, /* eevo_qmoderate */
108 ef10_ev_qstats_update, /* eevo_qstats_update */
111 #endif /* EFX_OPTS_EF10() */
113 #if EFSYS_OPT_RIVERHEAD
114 static const efx_ev_ops_t __efx_ev_rhead_ops = {
115 rhead_ev_init, /* eevo_init */
116 rhead_ev_fini, /* eevo_fini */
117 rhead_ev_qcreate, /* eevo_qcreate */
118 rhead_ev_qdestroy, /* eevo_qdestroy */
119 rhead_ev_qprime, /* eevo_qprime */
120 rhead_ev_qpost, /* eevo_qpost */
121 rhead_ev_qpoll, /* eevo_qpoll */
122 rhead_ev_qmoderate, /* eevo_qmoderate */
124 rhead_ev_qstats_update, /* eevo_qstats_update */
127 #endif /* EFSYS_OPT_RIVERHEAD */
130 __checkReturn efx_rc_t
134 const efx_ev_ops_t *eevop;
137 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
138 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
140 if (enp->en_mod_flags & EFX_MOD_EV) {
145 switch (enp->en_family) {
147 case EFX_FAMILY_SIENA:
148 eevop = &__efx_ev_siena_ops;
150 #endif /* EFSYS_OPT_SIENA */
152 #if EFSYS_OPT_HUNTINGTON
153 case EFX_FAMILY_HUNTINGTON:
154 eevop = &__efx_ev_ef10_ops;
156 #endif /* EFSYS_OPT_HUNTINGTON */
158 #if EFSYS_OPT_MEDFORD
159 case EFX_FAMILY_MEDFORD:
160 eevop = &__efx_ev_ef10_ops;
162 #endif /* EFSYS_OPT_MEDFORD */
164 #if EFSYS_OPT_MEDFORD2
165 case EFX_FAMILY_MEDFORD2:
166 eevop = &__efx_ev_ef10_ops;
168 #endif /* EFSYS_OPT_MEDFORD2 */
170 #if EFSYS_OPT_RIVERHEAD
171 case EFX_FAMILY_RIVERHEAD:
172 eevop = &__efx_ev_rhead_ops;
174 #endif /* EFSYS_OPT_RIVERHEAD */
182 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
184 if ((rc = eevop->eevo_init(enp)) != 0)
187 enp->en_eevop = eevop;
188 enp->en_mod_flags |= EFX_MOD_EV;
195 EFSYS_PROBE1(fail1, efx_rc_t, rc);
197 enp->en_eevop = NULL;
198 enp->en_mod_flags &= ~EFX_MOD_EV;
204 __in const efx_nic_t *enp,
205 __in unsigned int ndescs,
208 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
211 desc_size = encp->enc_ev_desc_size;
213 #if EFSYS_OPT_EV_EXTENDED_WIDTH
214 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
215 desc_size = encp->enc_ev_ew_desc_size;
217 EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0);
220 return (ndescs * desc_size);
223 __checkReturn unsigned int
225 __in const efx_nic_t *enp,
226 __in unsigned int ndescs,
231 size = efx_evq_size(enp, ndescs, flags);
233 return (EFX_DIV_ROUND_UP(size, EFX_BUF_SIZE));
240 const efx_ev_ops_t *eevop = enp->en_eevop;
242 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
243 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
244 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
245 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
246 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
247 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
249 eevop->eevo_fini(enp);
251 enp->en_eevop = NULL;
252 enp->en_mod_flags &= ~EFX_MOD_EV;
256 __checkReturn efx_rc_t
259 __in unsigned int index,
260 __in efsys_mem_t *esmp,
266 __deref_out efx_evq_t **eepp)
268 const efx_ev_ops_t *eevop = enp->en_eevop;
270 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
273 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
274 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
276 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
277 enp->en_nic_cfg.enc_evq_limit);
279 if (index >= encp->enc_evq_limit) {
284 if (us > encp->enc_evq_timer_max_us) {
289 switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
290 case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
292 case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
303 if ((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) &&
304 (encp->enc_ev_ew_desc_size == 0)) {
305 /* Extended width event descriptors are not supported. */
310 EFSYS_ASSERT(ISP2(encp->enc_evq_max_nevs));
311 EFSYS_ASSERT(ISP2(encp->enc_evq_min_nevs));
314 ndescs < encp->enc_evq_min_nevs ||
315 ndescs > encp->enc_evq_max_nevs) {
320 if (EFSYS_MEM_SIZE(esmp) < (ndescs * encp->enc_ev_desc_size)) {
321 /* Buffer too small for event queue descriptors. */
326 /* Allocate an EVQ object */
327 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
333 eep->ee_magic = EFX_EVQ_MAGIC;
335 eep->ee_index = index;
336 eep->ee_mask = ndescs - 1;
337 eep->ee_flags = flags;
341 * Set outputs before the queue is created because interrupts may be
342 * raised for events immediately after the queue is created, before the
343 * function call below returns. See bug58606.
345 * The eepp pointer passed in by the client must therefore point to data
346 * shared with the client's event processing context.
351 if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
362 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
378 EFSYS_PROBE1(fail1, efx_rc_t, rc);
382 __checkReturn efx_rc_t
385 __in unsigned int index,
386 __in efsys_mem_t *esmp,
391 __deref_out efx_evq_t **eepp)
393 uint32_t irq = index;
395 return (efx_ev_qcreate_irq(enp, index, esmp, ndescs, id, us, flags,
403 efx_nic_t *enp = eep->ee_enp;
404 const efx_ev_ops_t *eevop = enp->en_eevop;
406 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
408 EFSYS_ASSERT(enp->en_ev_qcount != 0);
411 eevop->eevo_qdestroy(eep);
413 /* Free the EVQ object */
414 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
417 __checkReturn efx_rc_t
420 __in unsigned int count)
422 efx_nic_t *enp = eep->ee_enp;
423 const efx_ev_ops_t *eevop = enp->en_eevop;
426 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
428 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
433 if ((rc = eevop->eevo_qprime(eep, count)) != 0)
441 EFSYS_PROBE1(fail1, efx_rc_t, rc);
445 __checkReturn boolean_t
448 __in unsigned int count)
453 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
455 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
456 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
458 return (EFX_EV_PRESENT(qword));
461 #if EFSYS_OPT_EV_PREFETCH
466 __in unsigned int count)
470 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
472 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
473 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
476 #endif /* EFSYS_OPT_EV_PREFETCH */
479 * This method is needed to ensure that eec_initialized callback
480 * is invoked after queue creation. The callback will be invoked
481 * on Riverhead boards which have no support for INIT_DONE events
482 * and will do nothing on other boards.
484 * The client drivers must call this method after calling efx_ev_create().
485 * The call must be done with the same locks being held (if any) which are
486 * normally acquired around efx_ev_qpoll() calls to ensure that
487 * eec_initialized callback is invoked within the same locking context.
490 efx_ev_qcreate_check_init_done(
492 __in const efx_ev_callbacks_t *eecp,
495 const efx_nic_cfg_t *encp;
497 EFSYS_ASSERT(eep != NULL);
498 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
499 EFSYS_ASSERT(eecp != NULL);
500 EFSYS_ASSERT(eecp->eec_initialized != NULL);
502 encp = efx_nic_cfg_get(eep->ee_enp);
504 if (encp->enc_evq_init_done_ev_supported == B_FALSE)
505 (void) eecp->eec_initialized(arg);
511 __inout unsigned int *countp,
512 __in const efx_ev_callbacks_t *eecp,
515 efx_nic_t *enp = eep->ee_enp;
516 const efx_ev_ops_t *eevop = enp->en_eevop;
518 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
520 EFSYS_ASSERT(eevop != NULL &&
521 eevop->eevo_qpoll != NULL);
523 eevop->eevo_qpoll(eep, countp, eecp, arg);
531 efx_nic_t *enp = eep->ee_enp;
532 const efx_ev_ops_t *eevop = enp->en_eevop;
534 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
536 EFSYS_ASSERT(eevop != NULL &&
537 eevop->eevo_qpost != NULL);
539 eevop->eevo_qpost(eep, data);
542 __checkReturn efx_rc_t
543 efx_ev_usecs_to_ticks(
545 __in unsigned int us,
546 __out unsigned int *ticksp)
548 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
552 if (encp->enc_evq_timer_quantum_ns == 0) {
557 /* Convert microseconds to a timer tick count */
560 else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
561 ticks = 1; /* Never round down to zero */
563 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
569 EFSYS_PROBE1(fail1, efx_rc_t, rc);
573 __checkReturn efx_rc_t
576 __in unsigned int us)
578 efx_nic_t *enp = eep->ee_enp;
579 const efx_ev_ops_t *eevop = enp->en_eevop;
582 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
584 if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
585 EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
590 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
598 EFSYS_PROBE1(fail1, efx_rc_t, rc);
604 efx_ev_qstats_update(
606 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
608 { efx_nic_t *enp = eep->ee_enp;
609 const efx_ev_ops_t *eevop = enp->en_eevop;
611 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
613 eevop->eevo_qstats_update(eep, stat);
616 #endif /* EFSYS_OPT_QSTATS */
620 static __checkReturn efx_rc_t
627 * Program the event queue for receive and transmit queue
630 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
631 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
632 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
638 static __checkReturn boolean_t
641 __in efx_qword_t *eqp,
644 __inout uint16_t *flagsp)
646 boolean_t ignore = B_FALSE;
648 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
649 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
650 EFSYS_PROBE(tobe_disc);
652 * Assume this is a unicast address mismatch, unless below
653 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
654 * EV_RX_PAUSE_FRM_ERR is set.
656 (*flagsp) |= EFX_ADDR_MISMATCH;
659 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
660 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
661 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
662 (*flagsp) |= EFX_DISCARD;
664 #if EFSYS_OPT_RX_SCATTER
666 * Lookout for payload queue ran dry errors and ignore them.
668 * Sadly for the header/data split cases, the descriptor
669 * pointer in this event refers to the header queue and
670 * therefore cannot be easily detected as duplicate.
671 * So we drop these and rely on the receive processing seeing
672 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
673 * the partially received packet.
675 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
676 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
677 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
679 #endif /* EFSYS_OPT_RX_SCATTER */
682 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
683 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
684 EFSYS_PROBE(crc_err);
685 (*flagsp) &= ~EFX_ADDR_MISMATCH;
686 (*flagsp) |= EFX_DISCARD;
689 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
690 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
691 EFSYS_PROBE(pause_frm_err);
692 (*flagsp) &= ~EFX_ADDR_MISMATCH;
693 (*flagsp) |= EFX_DISCARD;
696 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
697 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
698 EFSYS_PROBE(owner_id_err);
699 (*flagsp) |= EFX_DISCARD;
702 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
703 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
704 EFSYS_PROBE(ipv4_err);
705 (*flagsp) &= ~EFX_CKSUM_IPV4;
708 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
709 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
710 EFSYS_PROBE(udp_chk_err);
711 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
714 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
715 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
718 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
719 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
722 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
728 static __checkReturn boolean_t
731 __in efx_qword_t *eqp,
732 __in const efx_ev_callbacks_t *eecp,
739 #if EFSYS_OPT_RX_SCATTER
741 boolean_t jumbo_cont;
742 #endif /* EFSYS_OPT_RX_SCATTER */
747 boolean_t should_abort;
749 EFX_EV_QSTAT_INCR(eep, EV_RX);
751 /* Basic packet information */
752 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
753 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
754 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
755 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
757 #if EFSYS_OPT_RX_SCATTER
758 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
759 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
760 #endif /* EFSYS_OPT_RX_SCATTER */
762 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
764 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
767 * If packet is marked as OK and packet type is TCP/IP or
768 * UDP/IP or other IP, then we can rely on the hardware checksums.
771 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
772 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
774 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
775 flags |= EFX_PKT_IPV6;
777 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
778 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
782 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
783 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
785 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
786 flags |= EFX_PKT_IPV6;
788 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
789 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
793 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
795 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
796 flags = EFX_PKT_IPV6;
798 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
799 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
803 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
804 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
809 EFSYS_ASSERT(B_FALSE);
814 #if EFSYS_OPT_RX_SCATTER
815 /* Report scatter and header/lookahead split buffer flags */
817 flags |= EFX_PKT_START;
819 flags |= EFX_PKT_CONT;
820 #endif /* EFSYS_OPT_RX_SCATTER */
822 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
824 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
826 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
827 uint32_t, size, uint16_t, flags);
833 /* If we're not discarding the packet then it is ok */
834 if (~flags & EFX_DISCARD)
835 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
837 /* Detect multicast packets that didn't match the filter */
838 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
839 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
841 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
842 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
844 EFSYS_PROBE(mcast_mismatch);
845 flags |= EFX_ADDR_MISMATCH;
848 flags |= EFX_PKT_UNICAST;
852 * The packet parser in Siena can abort parsing packets under
853 * certain error conditions, setting the PKT_NOT_PARSED bit
854 * (which clears PKT_OK). If this is set, then don't trust
855 * the PKT_TYPE field.
860 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
862 flags |= EFX_CHECK_VLAN;
865 if (~flags & EFX_CHECK_VLAN) {
868 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
869 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
870 flags |= EFX_PKT_VLAN_TAGGED;
873 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
874 uint32_t, size, uint16_t, flags);
876 EFSYS_ASSERT(eecp->eec_rx != NULL);
877 should_abort = eecp->eec_rx(arg, label, id, size, flags);
879 return (should_abort);
882 static __checkReturn boolean_t
885 __in efx_qword_t *eqp,
886 __in const efx_ev_callbacks_t *eecp,
891 boolean_t should_abort;
893 EFX_EV_QSTAT_INCR(eep, EV_TX);
895 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
896 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
897 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
898 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
900 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
901 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
903 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
905 EFSYS_ASSERT(eecp->eec_tx != NULL);
906 should_abort = eecp->eec_tx(arg, label, id);
908 return (should_abort);
911 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
912 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
913 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
914 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
916 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
917 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
919 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
920 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
922 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
923 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
925 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
929 static __checkReturn boolean_t
932 __in efx_qword_t *eqp,
933 __in const efx_ev_callbacks_t *eecp,
936 _NOTE(ARGUNUSED(eqp, eecp, arg))
938 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
943 static __checkReturn boolean_t
946 __in efx_qword_t *eqp,
947 __in const efx_ev_callbacks_t *eecp,
950 boolean_t should_abort;
952 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
953 should_abort = B_FALSE;
955 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
956 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
959 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
961 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
963 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
965 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
966 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
970 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
974 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
975 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
977 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
978 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
981 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
983 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
985 should_abort = eecp->eec_rxq_flush_failed(arg,
988 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
990 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
992 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
997 case FSE_AZ_EVQ_INIT_DONE_EV:
998 EFSYS_ASSERT(eecp->eec_initialized != NULL);
999 should_abort = eecp->eec_initialized(arg);
1003 case FSE_AZ_EVQ_NOT_EN_EV:
1004 EFSYS_PROBE(evq_not_en);
1007 case FSE_AZ_SRM_UPD_DONE_EV: {
1010 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
1012 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1014 EFSYS_ASSERT(eecp->eec_sram != NULL);
1015 should_abort = eecp->eec_sram(arg, code);
1019 case FSE_AZ_WAKE_UP_EV: {
1022 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1024 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1025 should_abort = eecp->eec_wake_up(arg, id);
1029 case FSE_AZ_TX_PKT_NON_TCP_UDP:
1030 EFSYS_PROBE(tx_pkt_non_tcp_udp);
1033 case FSE_AZ_TIMER_EV: {
1036 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1038 EFSYS_ASSERT(eecp->eec_timer != NULL);
1039 should_abort = eecp->eec_timer(arg, id);
1043 case FSE_AZ_RX_DSC_ERROR_EV:
1044 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
1046 EFSYS_PROBE(rx_dsc_error);
1048 EFSYS_ASSERT(eecp->eec_exception != NULL);
1049 should_abort = eecp->eec_exception(arg,
1050 EFX_EXCEPTION_RX_DSC_ERROR, 0);
1054 case FSE_AZ_TX_DSC_ERROR_EV:
1055 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
1057 EFSYS_PROBE(tx_dsc_error);
1059 EFSYS_ASSERT(eecp->eec_exception != NULL);
1060 should_abort = eecp->eec_exception(arg,
1061 EFX_EXCEPTION_TX_DSC_ERROR, 0);
1069 return (should_abort);
1072 static __checkReturn boolean_t
1074 __in efx_evq_t *eep,
1075 __in efx_qword_t *eqp,
1076 __in const efx_ev_callbacks_t *eecp,
1080 boolean_t should_abort;
1082 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1084 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
1085 if (data >= ((uint32_t)1 << 16)) {
1086 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1087 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1088 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1092 EFSYS_ASSERT(eecp->eec_software != NULL);
1093 should_abort = eecp->eec_software(arg, (uint16_t)data);
1095 return (should_abort);
1100 static __checkReturn boolean_t
1102 __in efx_evq_t *eep,
1103 __in efx_qword_t *eqp,
1104 __in const efx_ev_callbacks_t *eecp,
1107 efx_nic_t *enp = eep->ee_enp;
1109 boolean_t should_abort = B_FALSE;
1111 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
1113 if (enp->en_family != EFX_FAMILY_SIENA)
1116 EFSYS_ASSERT(eecp->eec_link_change != NULL);
1117 EFSYS_ASSERT(eecp->eec_exception != NULL);
1118 #if EFSYS_OPT_MON_STATS
1119 EFSYS_ASSERT(eecp->eec_monitor != NULL);
1122 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1124 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1126 case MCDI_EVENT_CODE_BADSSERT:
1127 efx_mcdi_ev_death(enp, EINTR);
1130 case MCDI_EVENT_CODE_CMDDONE:
1131 efx_mcdi_ev_cpl(enp,
1132 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1133 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1134 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1137 case MCDI_EVENT_CODE_LINKCHANGE: {
1138 efx_link_mode_t link_mode;
1140 siena_phy_link_ev(enp, eqp, &link_mode);
1141 should_abort = eecp->eec_link_change(arg, link_mode);
1144 case MCDI_EVENT_CODE_SENSOREVT: {
1145 #if EFSYS_OPT_MON_STATS
1147 efx_mon_stat_value_t value;
1150 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
1151 should_abort = eecp->eec_monitor(arg, id, value);
1152 else if (rc == ENOTSUP) {
1153 should_abort = eecp->eec_exception(arg,
1154 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1155 MCDI_EV_FIELD(eqp, DATA));
1157 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1159 should_abort = B_FALSE;
1163 case MCDI_EVENT_CODE_SCHEDERR:
1164 /* Informational only */
1167 case MCDI_EVENT_CODE_REBOOT:
1168 efx_mcdi_ev_death(enp, EIO);
1171 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1172 #if EFSYS_OPT_MAC_STATS
1173 if (eecp->eec_mac_stats != NULL) {
1174 eecp->eec_mac_stats(arg,
1175 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1180 case MCDI_EVENT_CODE_FWALERT: {
1181 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1183 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1184 should_abort = eecp->eec_exception(arg,
1185 EFX_EXCEPTION_FWALERT_SRAM,
1186 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1188 should_abort = eecp->eec_exception(arg,
1189 EFX_EXCEPTION_UNKNOWN_FWALERT,
1190 MCDI_EV_FIELD(eqp, DATA));
1195 EFSYS_PROBE1(mc_pcol_error, int, code);
1200 return (should_abort);
1203 #endif /* EFSYS_OPT_MCDI */
1205 static __checkReturn efx_rc_t
1207 __in efx_evq_t *eep,
1208 __in unsigned int count)
1210 efx_nic_t *enp = eep->ee_enp;
1214 rptr = count & eep->ee_mask;
1216 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
1218 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
1226 __in efx_evq_t *eep,
1229 efx_nic_t *enp = eep->ee_enp;
1233 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
1234 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
1236 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
1237 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
1238 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
1240 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
1243 static __checkReturn efx_rc_t
1245 __in efx_evq_t *eep,
1246 __in unsigned int us)
1248 efx_nic_t *enp = eep->ee_enp;
1249 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1250 unsigned int locked;
1254 if (us > encp->enc_evq_timer_max_us) {
1259 /* If the value is zero then disable the timer */
1261 EFX_POPULATE_DWORD_2(dword,
1262 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
1263 FRF_CZ_TC_TIMER_VAL, 0);
1267 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
1270 EFSYS_ASSERT(ticks > 0);
1271 EFX_POPULATE_DWORD_2(dword,
1272 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
1273 FRF_CZ_TC_TIMER_VAL, ticks - 1);
1276 locked = (eep->ee_index == 0) ? 1 : 0;
1278 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
1279 eep->ee_index, &dword, locked);
1286 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1291 static __checkReturn efx_rc_t
1293 __in efx_nic_t *enp,
1294 __in unsigned int index,
1295 __in efsys_mem_t *esmp,
1299 __in uint32_t flags,
1301 __in efx_evq_t *eep)
1303 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1307 boolean_t notify_mode;
1309 _NOTE(ARGUNUSED(esmp))
1311 EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0);
1318 #if EFSYS_OPT_RX_SCALE
1319 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
1320 index >= EFX_MAXRSS_LEGACY) {
1326 (1U << size) <= encp->enc_evq_max_nevs / encp->enc_evq_min_nevs;
1328 if ((1U << size) == (uint32_t)ndescs / encp->enc_evq_min_nevs)
1330 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1335 /* Set up the handler table */
1336 eep->ee_rx = siena_ev_rx;
1337 eep->ee_tx = siena_ev_tx;
1338 eep->ee_driver = siena_ev_driver;
1339 eep->ee_global = siena_ev_global;
1340 eep->ee_drv_gen = siena_ev_drv_gen;
1342 eep->ee_mcdi = siena_ev_mcdi;
1343 #endif /* EFSYS_OPT_MCDI */
1345 notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
1346 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
1348 /* Set up the new event queue */
1349 EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
1350 FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
1351 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1352 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
1354 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
1355 FRF_AZ_EVQ_BUF_BASE_ID, id);
1357 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
1359 /* Set initial interrupt moderation */
1360 siena_ev_qmoderate(eep, us);
1366 #if EFSYS_OPT_RX_SCALE
1371 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1376 #endif /* EFSYS_OPT_SIENA */
1378 #if EFSYS_OPT_QSTATS
1380 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock ac223f7134058b4f */
1381 static const char * const __efx_ev_qstat_name[] = {
1388 "rx_buf_owner_id_err",
1389 "rx_ipv4_hdr_chksum_err",
1390 "rx_tcp_udp_chksum_err",
1394 "rx_mcast_hash_match",
1411 "driver_srm_upd_done",
1412 "driver_tx_descq_fls_done",
1413 "driver_rx_descq_fls_done",
1414 "driver_rx_descq_fls_failed",
1415 "driver_rx_dsc_error",
1416 "driver_tx_dsc_error",
1419 "rx_parse_incomplete",
1421 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1425 __in efx_nic_t *enp,
1426 __in unsigned int id)
1428 _NOTE(ARGUNUSED(enp))
1430 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1431 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1433 return (__efx_ev_qstat_name[id]);
1435 #endif /* EFSYS_OPT_NAMES */
1436 #endif /* EFSYS_OPT_QSTATS */
1440 #if EFSYS_OPT_QSTATS
1442 siena_ev_qstats_update(
1443 __in efx_evq_t *eep,
1444 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1448 for (id = 0; id < EV_NQSTATS; id++) {
1449 efsys_stat_t *essp = &stat[id];
1451 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1452 eep->ee_stat[id] = 0;
1455 #endif /* EFSYS_OPT_QSTATS */
1459 __in efx_evq_t *eep)
1461 efx_nic_t *enp = eep->ee_enp;
1464 /* Purge event queue */
1465 EFX_ZERO_OWORD(oword);
1467 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1468 eep->ee_index, &oword, B_TRUE);
1470 EFX_ZERO_OWORD(oword);
1471 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
1476 __in efx_nic_t *enp)
1478 _NOTE(ARGUNUSED(enp))
1481 #endif /* EFSYS_OPT_SIENA */
1483 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA
1485 #define EFX_EV_BATCH 8
1488 siena_ef10_ev_qpoll(
1489 __in efx_evq_t *eep,
1490 __inout unsigned int *countp,
1491 __in const efx_ev_callbacks_t *eecp,
1494 efx_qword_t ev[EFX_EV_BATCH];
1501 /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
1502 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
1503 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
1505 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
1506 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
1507 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
1508 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
1509 FSE_AZ_EV_CODE_DRV_GEN_EV);
1511 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
1512 FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
1515 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
1516 EFSYS_ASSERT(countp != NULL);
1517 EFSYS_ASSERT(eecp != NULL);
1521 /* Read up until the end of the batch period */
1522 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
1523 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1524 for (total = 0; total < batch; ++total) {
1525 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
1527 if (!EFX_EV_PRESENT(ev[total]))
1530 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
1531 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
1532 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
1534 offset += sizeof (efx_qword_t);
1537 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
1539 * Prefetch the next batch when we get within PREFETCH_PERIOD
1540 * of a completed batch. If the batch is smaller, then prefetch
1543 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
1544 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1545 #endif /* EFSYS_OPT_EV_PREFETCH */
1547 /* Process the batch of events */
1548 for (index = 0; index < total; ++index) {
1549 boolean_t should_abort;
1552 #if EFSYS_OPT_EV_PREFETCH
1553 /* Prefetch if we've now reached the batch period */
1554 if (total == batch &&
1555 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
1556 offset = (count + batch) & eep->ee_mask;
1557 offset *= sizeof (efx_qword_t);
1559 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1561 #endif /* EFSYS_OPT_EV_PREFETCH */
1563 EFX_EV_QSTAT_INCR(eep, EV_ALL);
1565 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
1567 case FSE_AZ_EV_CODE_RX_EV:
1568 should_abort = eep->ee_rx(eep,
1569 &(ev[index]), eecp, arg);
1571 case FSE_AZ_EV_CODE_TX_EV:
1572 should_abort = eep->ee_tx(eep,
1573 &(ev[index]), eecp, arg);
1575 case FSE_AZ_EV_CODE_DRIVER_EV:
1576 should_abort = eep->ee_driver(eep,
1577 &(ev[index]), eecp, arg);
1579 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1580 should_abort = eep->ee_drv_gen(eep,
1581 &(ev[index]), eecp, arg);
1584 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
1585 should_abort = eep->ee_mcdi(eep,
1586 &(ev[index]), eecp, arg);
1589 case FSE_AZ_EV_CODE_GLOBAL_EV:
1590 if (eep->ee_global) {
1591 should_abort = eep->ee_global(eep,
1592 &(ev[index]), eecp, arg);
1595 /* else fallthrough */
1597 EFSYS_PROBE3(bad_event,
1598 unsigned int, eep->ee_index,
1600 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
1602 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
1604 EFSYS_ASSERT(eecp->eec_exception != NULL);
1605 (void) eecp->eec_exception(arg,
1606 EFX_EXCEPTION_EV_ERROR, code);
1607 should_abort = B_TRUE;
1610 /* Ignore subsequent events */
1614 * Poison batch to ensure the outer
1615 * loop is broken out of.
1617 EFSYS_ASSERT(batch <= EFX_EV_BATCH);
1618 batch += (EFX_EV_BATCH << 1);
1619 EFSYS_ASSERT(total != batch);
1625 * Now that the hardware has most likely moved onto dma'ing
1626 * into the next cache line, clear the processed events. Take
1627 * care to only clear out events that we've processed
1629 EFX_SET_QWORD(ev[0]);
1630 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1631 for (index = 0; index < total; ++index) {
1632 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
1633 offset += sizeof (efx_qword_t);
1638 } while (total == batch);
1643 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */