1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2007-2019 Solarflare Communications Inc.
13 #define EFX_EV_PRESENT(_qword) \
14 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
15 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
21 static __checkReturn efx_rc_t
29 static __checkReturn efx_rc_t
32 __in unsigned int index,
33 __in efsys_mem_t *esmp,
44 static __checkReturn efx_rc_t
47 __in unsigned int count);
54 static __checkReturn efx_rc_t
57 __in unsigned int us);
61 siena_ev_qstats_update(
63 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
67 #endif /* EFSYS_OPT_SIENA */
69 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA
74 __inout unsigned int *countp,
75 __in const efx_ev_callbacks_t *eecp,
78 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */
81 static const efx_ev_ops_t __efx_ev_siena_ops = {
82 siena_ev_init, /* eevo_init */
83 siena_ev_fini, /* eevo_fini */
84 siena_ev_qcreate, /* eevo_qcreate */
85 siena_ev_qdestroy, /* eevo_qdestroy */
86 siena_ev_qprime, /* eevo_qprime */
87 siena_ev_qpost, /* eevo_qpost */
88 siena_ef10_ev_qpoll, /* eevo_qpoll */
89 siena_ev_qmoderate, /* eevo_qmoderate */
91 siena_ev_qstats_update, /* eevo_qstats_update */
94 #endif /* EFSYS_OPT_SIENA */
97 static const efx_ev_ops_t __efx_ev_ef10_ops = {
98 ef10_ev_init, /* eevo_init */
99 ef10_ev_fini, /* eevo_fini */
100 ef10_ev_qcreate, /* eevo_qcreate */
101 ef10_ev_qdestroy, /* eevo_qdestroy */
102 ef10_ev_qprime, /* eevo_qprime */
103 ef10_ev_qpost, /* eevo_qpost */
104 siena_ef10_ev_qpoll, /* eevo_qpoll */
105 ef10_ev_qmoderate, /* eevo_qmoderate */
107 ef10_ev_qstats_update, /* eevo_qstats_update */
110 #endif /* EFX_OPTS_EF10() */
112 #if EFSYS_OPT_RIVERHEAD
113 static const efx_ev_ops_t __efx_ev_rhead_ops = {
114 rhead_ev_init, /* eevo_init */
115 rhead_ev_fini, /* eevo_fini */
116 rhead_ev_qcreate, /* eevo_qcreate */
117 rhead_ev_qdestroy, /* eevo_qdestroy */
118 rhead_ev_qprime, /* eevo_qprime */
119 rhead_ev_qpost, /* eevo_qpost */
120 rhead_ev_qpoll, /* eevo_qpoll */
121 rhead_ev_qmoderate, /* eevo_qmoderate */
123 rhead_ev_qstats_update, /* eevo_qstats_update */
126 #endif /* EFSYS_OPT_RIVERHEAD */
129 __checkReturn efx_rc_t
133 const efx_ev_ops_t *eevop;
136 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
137 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
139 if (enp->en_mod_flags & EFX_MOD_EV) {
144 switch (enp->en_family) {
146 case EFX_FAMILY_SIENA:
147 eevop = &__efx_ev_siena_ops;
149 #endif /* EFSYS_OPT_SIENA */
151 #if EFSYS_OPT_HUNTINGTON
152 case EFX_FAMILY_HUNTINGTON:
153 eevop = &__efx_ev_ef10_ops;
155 #endif /* EFSYS_OPT_HUNTINGTON */
157 #if EFSYS_OPT_MEDFORD
158 case EFX_FAMILY_MEDFORD:
159 eevop = &__efx_ev_ef10_ops;
161 #endif /* EFSYS_OPT_MEDFORD */
163 #if EFSYS_OPT_MEDFORD2
164 case EFX_FAMILY_MEDFORD2:
165 eevop = &__efx_ev_ef10_ops;
167 #endif /* EFSYS_OPT_MEDFORD2 */
169 #if EFSYS_OPT_RIVERHEAD
170 case EFX_FAMILY_RIVERHEAD:
171 eevop = &__efx_ev_rhead_ops;
173 #endif /* EFSYS_OPT_RIVERHEAD */
181 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
183 if ((rc = eevop->eevo_init(enp)) != 0)
186 enp->en_eevop = eevop;
187 enp->en_mod_flags |= EFX_MOD_EV;
194 EFSYS_PROBE1(fail1, efx_rc_t, rc);
196 enp->en_eevop = NULL;
197 enp->en_mod_flags &= ~EFX_MOD_EV;
203 __in const efx_nic_t *enp,
204 __in unsigned int ndescs)
206 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
208 return (ndescs * encp->enc_ev_desc_size);
211 __checkReturn unsigned int
213 __in const efx_nic_t *enp,
214 __in unsigned int ndescs)
216 return (EFX_DIV_ROUND_UP(efx_evq_size(enp, ndescs), EFX_BUF_SIZE));
223 const efx_ev_ops_t *eevop = enp->en_eevop;
225 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
226 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
227 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
228 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
229 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
230 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
232 eevop->eevo_fini(enp);
234 enp->en_eevop = NULL;
235 enp->en_mod_flags &= ~EFX_MOD_EV;
239 __checkReturn efx_rc_t
242 __in unsigned int index,
243 __in efsys_mem_t *esmp,
248 __deref_out efx_evq_t **eepp)
250 const efx_ev_ops_t *eevop = enp->en_eevop;
252 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
255 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
256 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
258 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
259 enp->en_nic_cfg.enc_evq_limit);
261 if (index >= encp->enc_evq_limit) {
266 if (us > encp->enc_evq_timer_max_us) {
271 switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
272 case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
274 case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
285 EFSYS_ASSERT(ISP2(encp->enc_evq_max_nevs));
286 EFSYS_ASSERT(ISP2(encp->enc_evq_min_nevs));
289 ndescs < encp->enc_evq_min_nevs ||
290 ndescs > encp->enc_evq_max_nevs) {
295 /* Allocate an EVQ object */
296 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
302 eep->ee_magic = EFX_EVQ_MAGIC;
304 eep->ee_index = index;
305 eep->ee_mask = ndescs - 1;
306 eep->ee_flags = flags;
310 * Set outputs before the queue is created because interrupts may be
311 * raised for events immediately after the queue is created, before the
312 * function call below returns. See bug58606.
314 * The eepp pointer passed in by the client must therefore point to data
315 * shared with the client's event processing context.
320 if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
331 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
343 EFSYS_PROBE1(fail1, efx_rc_t, rc);
351 efx_nic_t *enp = eep->ee_enp;
352 const efx_ev_ops_t *eevop = enp->en_eevop;
354 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
356 EFSYS_ASSERT(enp->en_ev_qcount != 0);
359 eevop->eevo_qdestroy(eep);
361 /* Free the EVQ object */
362 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
365 __checkReturn efx_rc_t
368 __in unsigned int count)
370 efx_nic_t *enp = eep->ee_enp;
371 const efx_ev_ops_t *eevop = enp->en_eevop;
374 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
376 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
381 if ((rc = eevop->eevo_qprime(eep, count)) != 0)
389 EFSYS_PROBE1(fail1, efx_rc_t, rc);
393 __checkReturn boolean_t
396 __in unsigned int count)
401 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
403 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
404 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
406 return (EFX_EV_PRESENT(qword));
409 #if EFSYS_OPT_EV_PREFETCH
414 __in unsigned int count)
418 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
420 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
421 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
424 #endif /* EFSYS_OPT_EV_PREFETCH */
429 __inout unsigned int *countp,
430 __in const efx_ev_callbacks_t *eecp,
433 efx_nic_t *enp = eep->ee_enp;
434 const efx_ev_ops_t *eevop = enp->en_eevop;
436 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
438 EFSYS_ASSERT(eevop != NULL &&
439 eevop->eevo_qpoll != NULL);
441 eevop->eevo_qpoll(eep, countp, eecp, arg);
449 efx_nic_t *enp = eep->ee_enp;
450 const efx_ev_ops_t *eevop = enp->en_eevop;
452 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
454 EFSYS_ASSERT(eevop != NULL &&
455 eevop->eevo_qpost != NULL);
457 eevop->eevo_qpost(eep, data);
460 __checkReturn efx_rc_t
461 efx_ev_usecs_to_ticks(
463 __in unsigned int us,
464 __out unsigned int *ticksp)
466 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
470 if (encp->enc_evq_timer_quantum_ns == 0) {
475 /* Convert microseconds to a timer tick count */
478 else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
479 ticks = 1; /* Never round down to zero */
481 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
487 EFSYS_PROBE1(fail1, efx_rc_t, rc);
491 __checkReturn efx_rc_t
494 __in unsigned int us)
496 efx_nic_t *enp = eep->ee_enp;
497 const efx_ev_ops_t *eevop = enp->en_eevop;
500 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
502 if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
503 EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
508 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
516 EFSYS_PROBE1(fail1, efx_rc_t, rc);
522 efx_ev_qstats_update(
524 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
526 { efx_nic_t *enp = eep->ee_enp;
527 const efx_ev_ops_t *eevop = enp->en_eevop;
529 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
531 eevop->eevo_qstats_update(eep, stat);
534 #endif /* EFSYS_OPT_QSTATS */
538 static __checkReturn efx_rc_t
545 * Program the event queue for receive and transmit queue
548 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
549 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
550 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
556 static __checkReturn boolean_t
559 __in efx_qword_t *eqp,
562 __inout uint16_t *flagsp)
564 boolean_t ignore = B_FALSE;
566 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
567 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
568 EFSYS_PROBE(tobe_disc);
570 * Assume this is a unicast address mismatch, unless below
571 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
572 * EV_RX_PAUSE_FRM_ERR is set.
574 (*flagsp) |= EFX_ADDR_MISMATCH;
577 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
578 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
579 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
580 (*flagsp) |= EFX_DISCARD;
582 #if EFSYS_OPT_RX_SCATTER
584 * Lookout for payload queue ran dry errors and ignore them.
586 * Sadly for the header/data split cases, the descriptor
587 * pointer in this event refers to the header queue and
588 * therefore cannot be easily detected as duplicate.
589 * So we drop these and rely on the receive processing seeing
590 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
591 * the partially received packet.
593 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
594 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
595 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
597 #endif /* EFSYS_OPT_RX_SCATTER */
600 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
601 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
602 EFSYS_PROBE(crc_err);
603 (*flagsp) &= ~EFX_ADDR_MISMATCH;
604 (*flagsp) |= EFX_DISCARD;
607 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
608 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
609 EFSYS_PROBE(pause_frm_err);
610 (*flagsp) &= ~EFX_ADDR_MISMATCH;
611 (*flagsp) |= EFX_DISCARD;
614 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
615 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
616 EFSYS_PROBE(owner_id_err);
617 (*flagsp) |= EFX_DISCARD;
620 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
621 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
622 EFSYS_PROBE(ipv4_err);
623 (*flagsp) &= ~EFX_CKSUM_IPV4;
626 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
627 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
628 EFSYS_PROBE(udp_chk_err);
629 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
632 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
633 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
636 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
637 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
640 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
646 static __checkReturn boolean_t
649 __in efx_qword_t *eqp,
650 __in const efx_ev_callbacks_t *eecp,
657 #if EFSYS_OPT_RX_SCATTER
659 boolean_t jumbo_cont;
660 #endif /* EFSYS_OPT_RX_SCATTER */
665 boolean_t should_abort;
667 EFX_EV_QSTAT_INCR(eep, EV_RX);
669 /* Basic packet information */
670 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
671 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
672 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
673 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
675 #if EFSYS_OPT_RX_SCATTER
676 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
677 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
678 #endif /* EFSYS_OPT_RX_SCATTER */
680 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
682 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
685 * If packet is marked as OK and packet type is TCP/IP or
686 * UDP/IP or other IP, then we can rely on the hardware checksums.
689 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
690 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
692 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
693 flags |= EFX_PKT_IPV6;
695 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
696 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
700 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
701 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
703 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
704 flags |= EFX_PKT_IPV6;
706 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
707 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
711 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
713 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
714 flags = EFX_PKT_IPV6;
716 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
717 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
721 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
722 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
727 EFSYS_ASSERT(B_FALSE);
732 #if EFSYS_OPT_RX_SCATTER
733 /* Report scatter and header/lookahead split buffer flags */
735 flags |= EFX_PKT_START;
737 flags |= EFX_PKT_CONT;
738 #endif /* EFSYS_OPT_RX_SCATTER */
740 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
742 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
744 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
745 uint32_t, size, uint16_t, flags);
751 /* If we're not discarding the packet then it is ok */
752 if (~flags & EFX_DISCARD)
753 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
755 /* Detect multicast packets that didn't match the filter */
756 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
757 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
759 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
760 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
762 EFSYS_PROBE(mcast_mismatch);
763 flags |= EFX_ADDR_MISMATCH;
766 flags |= EFX_PKT_UNICAST;
770 * The packet parser in Siena can abort parsing packets under
771 * certain error conditions, setting the PKT_NOT_PARSED bit
772 * (which clears PKT_OK). If this is set, then don't trust
773 * the PKT_TYPE field.
778 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
780 flags |= EFX_CHECK_VLAN;
783 if (~flags & EFX_CHECK_VLAN) {
786 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
787 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
788 flags |= EFX_PKT_VLAN_TAGGED;
791 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
792 uint32_t, size, uint16_t, flags);
794 EFSYS_ASSERT(eecp->eec_rx != NULL);
795 should_abort = eecp->eec_rx(arg, label, id, size, flags);
797 return (should_abort);
800 static __checkReturn boolean_t
803 __in efx_qword_t *eqp,
804 __in const efx_ev_callbacks_t *eecp,
809 boolean_t should_abort;
811 EFX_EV_QSTAT_INCR(eep, EV_TX);
813 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
814 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
815 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
816 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
818 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
819 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
821 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
823 EFSYS_ASSERT(eecp->eec_tx != NULL);
824 should_abort = eecp->eec_tx(arg, label, id);
826 return (should_abort);
829 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
830 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
831 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
832 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
834 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
835 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
837 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
838 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
840 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
841 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
843 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
847 static __checkReturn boolean_t
850 __in efx_qword_t *eqp,
851 __in const efx_ev_callbacks_t *eecp,
854 _NOTE(ARGUNUSED(eqp, eecp, arg))
856 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
861 static __checkReturn boolean_t
864 __in efx_qword_t *eqp,
865 __in const efx_ev_callbacks_t *eecp,
868 boolean_t should_abort;
870 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
871 should_abort = B_FALSE;
873 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
874 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
877 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
879 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
881 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
883 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
884 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
888 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
892 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
893 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
895 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
896 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
899 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
901 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
903 should_abort = eecp->eec_rxq_flush_failed(arg,
906 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
908 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
910 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
915 case FSE_AZ_EVQ_INIT_DONE_EV:
916 EFSYS_ASSERT(eecp->eec_initialized != NULL);
917 should_abort = eecp->eec_initialized(arg);
921 case FSE_AZ_EVQ_NOT_EN_EV:
922 EFSYS_PROBE(evq_not_en);
925 case FSE_AZ_SRM_UPD_DONE_EV: {
928 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
930 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
932 EFSYS_ASSERT(eecp->eec_sram != NULL);
933 should_abort = eecp->eec_sram(arg, code);
937 case FSE_AZ_WAKE_UP_EV: {
940 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
942 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
943 should_abort = eecp->eec_wake_up(arg, id);
947 case FSE_AZ_TX_PKT_NON_TCP_UDP:
948 EFSYS_PROBE(tx_pkt_non_tcp_udp);
951 case FSE_AZ_TIMER_EV: {
954 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
956 EFSYS_ASSERT(eecp->eec_timer != NULL);
957 should_abort = eecp->eec_timer(arg, id);
961 case FSE_AZ_RX_DSC_ERROR_EV:
962 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
964 EFSYS_PROBE(rx_dsc_error);
966 EFSYS_ASSERT(eecp->eec_exception != NULL);
967 should_abort = eecp->eec_exception(arg,
968 EFX_EXCEPTION_RX_DSC_ERROR, 0);
972 case FSE_AZ_TX_DSC_ERROR_EV:
973 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
975 EFSYS_PROBE(tx_dsc_error);
977 EFSYS_ASSERT(eecp->eec_exception != NULL);
978 should_abort = eecp->eec_exception(arg,
979 EFX_EXCEPTION_TX_DSC_ERROR, 0);
987 return (should_abort);
990 static __checkReturn boolean_t
993 __in efx_qword_t *eqp,
994 __in const efx_ev_callbacks_t *eecp,
998 boolean_t should_abort;
1000 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1002 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
1003 if (data >= ((uint32_t)1 << 16)) {
1004 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1005 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1006 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1010 EFSYS_ASSERT(eecp->eec_software != NULL);
1011 should_abort = eecp->eec_software(arg, (uint16_t)data);
1013 return (should_abort);
1018 static __checkReturn boolean_t
1020 __in efx_evq_t *eep,
1021 __in efx_qword_t *eqp,
1022 __in const efx_ev_callbacks_t *eecp,
1025 efx_nic_t *enp = eep->ee_enp;
1027 boolean_t should_abort = B_FALSE;
1029 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
1031 if (enp->en_family != EFX_FAMILY_SIENA)
1034 EFSYS_ASSERT(eecp->eec_link_change != NULL);
1035 EFSYS_ASSERT(eecp->eec_exception != NULL);
1036 #if EFSYS_OPT_MON_STATS
1037 EFSYS_ASSERT(eecp->eec_monitor != NULL);
1040 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1042 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1044 case MCDI_EVENT_CODE_BADSSERT:
1045 efx_mcdi_ev_death(enp, EINTR);
1048 case MCDI_EVENT_CODE_CMDDONE:
1049 efx_mcdi_ev_cpl(enp,
1050 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1051 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1052 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1055 case MCDI_EVENT_CODE_LINKCHANGE: {
1056 efx_link_mode_t link_mode;
1058 siena_phy_link_ev(enp, eqp, &link_mode);
1059 should_abort = eecp->eec_link_change(arg, link_mode);
1062 case MCDI_EVENT_CODE_SENSOREVT: {
1063 #if EFSYS_OPT_MON_STATS
1065 efx_mon_stat_value_t value;
1068 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
1069 should_abort = eecp->eec_monitor(arg, id, value);
1070 else if (rc == ENOTSUP) {
1071 should_abort = eecp->eec_exception(arg,
1072 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1073 MCDI_EV_FIELD(eqp, DATA));
1075 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1077 should_abort = B_FALSE;
1081 case MCDI_EVENT_CODE_SCHEDERR:
1082 /* Informational only */
1085 case MCDI_EVENT_CODE_REBOOT:
1086 efx_mcdi_ev_death(enp, EIO);
1089 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1090 #if EFSYS_OPT_MAC_STATS
1091 if (eecp->eec_mac_stats != NULL) {
1092 eecp->eec_mac_stats(arg,
1093 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1098 case MCDI_EVENT_CODE_FWALERT: {
1099 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1101 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1102 should_abort = eecp->eec_exception(arg,
1103 EFX_EXCEPTION_FWALERT_SRAM,
1104 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1106 should_abort = eecp->eec_exception(arg,
1107 EFX_EXCEPTION_UNKNOWN_FWALERT,
1108 MCDI_EV_FIELD(eqp, DATA));
1113 EFSYS_PROBE1(mc_pcol_error, int, code);
1118 return (should_abort);
1121 #endif /* EFSYS_OPT_MCDI */
1123 static __checkReturn efx_rc_t
1125 __in efx_evq_t *eep,
1126 __in unsigned int count)
1128 efx_nic_t *enp = eep->ee_enp;
1132 rptr = count & eep->ee_mask;
1134 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
1136 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
1144 __in efx_evq_t *eep,
1147 efx_nic_t *enp = eep->ee_enp;
1151 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
1152 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
1154 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
1155 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
1156 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
1158 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
1161 static __checkReturn efx_rc_t
1163 __in efx_evq_t *eep,
1164 __in unsigned int us)
1166 efx_nic_t *enp = eep->ee_enp;
1167 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1168 unsigned int locked;
1172 if (us > encp->enc_evq_timer_max_us) {
1177 /* If the value is zero then disable the timer */
1179 EFX_POPULATE_DWORD_2(dword,
1180 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
1181 FRF_CZ_TC_TIMER_VAL, 0);
1185 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
1188 EFSYS_ASSERT(ticks > 0);
1189 EFX_POPULATE_DWORD_2(dword,
1190 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
1191 FRF_CZ_TC_TIMER_VAL, ticks - 1);
1194 locked = (eep->ee_index == 0) ? 1 : 0;
1196 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
1197 eep->ee_index, &dword, locked);
1204 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1209 static __checkReturn efx_rc_t
1211 __in efx_nic_t *enp,
1212 __in unsigned int index,
1213 __in efsys_mem_t *esmp,
1217 __in uint32_t flags,
1218 __in efx_evq_t *eep)
1220 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1224 boolean_t notify_mode;
1226 _NOTE(ARGUNUSED(esmp))
1228 #if EFSYS_OPT_RX_SCALE
1229 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
1230 index >= EFX_MAXRSS_LEGACY) {
1236 (1U << size) <= encp->enc_evq_max_nevs / encp->enc_evq_min_nevs;
1238 if ((1U << size) == (uint32_t)ndescs / encp->enc_evq_min_nevs)
1240 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1245 /* Set up the handler table */
1246 eep->ee_rx = siena_ev_rx;
1247 eep->ee_tx = siena_ev_tx;
1248 eep->ee_driver = siena_ev_driver;
1249 eep->ee_global = siena_ev_global;
1250 eep->ee_drv_gen = siena_ev_drv_gen;
1252 eep->ee_mcdi = siena_ev_mcdi;
1253 #endif /* EFSYS_OPT_MCDI */
1255 notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
1256 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
1258 /* Set up the new event queue */
1259 EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
1260 FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
1261 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1262 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
1264 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
1265 FRF_AZ_EVQ_BUF_BASE_ID, id);
1267 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
1269 /* Set initial interrupt moderation */
1270 siena_ev_qmoderate(eep, us);
1276 #if EFSYS_OPT_RX_SCALE
1279 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1284 #endif /* EFSYS_OPT_SIENA */
1286 #if EFSYS_OPT_QSTATS
1288 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock ac223f7134058b4f */
1289 static const char * const __efx_ev_qstat_name[] = {
1296 "rx_buf_owner_id_err",
1297 "rx_ipv4_hdr_chksum_err",
1298 "rx_tcp_udp_chksum_err",
1302 "rx_mcast_hash_match",
1319 "driver_srm_upd_done",
1320 "driver_tx_descq_fls_done",
1321 "driver_rx_descq_fls_done",
1322 "driver_rx_descq_fls_failed",
1323 "driver_rx_dsc_error",
1324 "driver_tx_dsc_error",
1327 "rx_parse_incomplete",
1329 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1333 __in efx_nic_t *enp,
1334 __in unsigned int id)
1336 _NOTE(ARGUNUSED(enp))
1338 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1339 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1341 return (__efx_ev_qstat_name[id]);
1343 #endif /* EFSYS_OPT_NAMES */
1344 #endif /* EFSYS_OPT_QSTATS */
1348 #if EFSYS_OPT_QSTATS
1350 siena_ev_qstats_update(
1351 __in efx_evq_t *eep,
1352 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1356 for (id = 0; id < EV_NQSTATS; id++) {
1357 efsys_stat_t *essp = &stat[id];
1359 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1360 eep->ee_stat[id] = 0;
1363 #endif /* EFSYS_OPT_QSTATS */
1367 __in efx_evq_t *eep)
1369 efx_nic_t *enp = eep->ee_enp;
1372 /* Purge event queue */
1373 EFX_ZERO_OWORD(oword);
1375 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1376 eep->ee_index, &oword, B_TRUE);
1378 EFX_ZERO_OWORD(oword);
1379 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
1384 __in efx_nic_t *enp)
1386 _NOTE(ARGUNUSED(enp))
1389 #endif /* EFSYS_OPT_SIENA */
1391 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA
1393 #define EFX_EV_BATCH 8
1396 siena_ef10_ev_qpoll(
1397 __in efx_evq_t *eep,
1398 __inout unsigned int *countp,
1399 __in const efx_ev_callbacks_t *eecp,
1402 efx_qword_t ev[EFX_EV_BATCH];
1409 /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
1410 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
1411 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
1413 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
1414 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
1415 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
1416 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
1417 FSE_AZ_EV_CODE_DRV_GEN_EV);
1419 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
1420 FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
1423 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
1424 EFSYS_ASSERT(countp != NULL);
1425 EFSYS_ASSERT(eecp != NULL);
1429 /* Read up until the end of the batch period */
1430 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
1431 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1432 for (total = 0; total < batch; ++total) {
1433 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
1435 if (!EFX_EV_PRESENT(ev[total]))
1438 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
1439 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
1440 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
1442 offset += sizeof (efx_qword_t);
1445 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
1447 * Prefetch the next batch when we get within PREFETCH_PERIOD
1448 * of a completed batch. If the batch is smaller, then prefetch
1451 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
1452 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1453 #endif /* EFSYS_OPT_EV_PREFETCH */
1455 /* Process the batch of events */
1456 for (index = 0; index < total; ++index) {
1457 boolean_t should_abort;
1460 #if EFSYS_OPT_EV_PREFETCH
1461 /* Prefetch if we've now reached the batch period */
1462 if (total == batch &&
1463 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
1464 offset = (count + batch) & eep->ee_mask;
1465 offset *= sizeof (efx_qword_t);
1467 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1469 #endif /* EFSYS_OPT_EV_PREFETCH */
1471 EFX_EV_QSTAT_INCR(eep, EV_ALL);
1473 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
1475 case FSE_AZ_EV_CODE_RX_EV:
1476 should_abort = eep->ee_rx(eep,
1477 &(ev[index]), eecp, arg);
1479 case FSE_AZ_EV_CODE_TX_EV:
1480 should_abort = eep->ee_tx(eep,
1481 &(ev[index]), eecp, arg);
1483 case FSE_AZ_EV_CODE_DRIVER_EV:
1484 should_abort = eep->ee_driver(eep,
1485 &(ev[index]), eecp, arg);
1487 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1488 should_abort = eep->ee_drv_gen(eep,
1489 &(ev[index]), eecp, arg);
1492 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
1493 should_abort = eep->ee_mcdi(eep,
1494 &(ev[index]), eecp, arg);
1497 case FSE_AZ_EV_CODE_GLOBAL_EV:
1498 if (eep->ee_global) {
1499 should_abort = eep->ee_global(eep,
1500 &(ev[index]), eecp, arg);
1503 /* else fallthrough */
1505 EFSYS_PROBE3(bad_event,
1506 unsigned int, eep->ee_index,
1508 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
1510 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
1512 EFSYS_ASSERT(eecp->eec_exception != NULL);
1513 (void) eecp->eec_exception(arg,
1514 EFX_EXCEPTION_EV_ERROR, code);
1515 should_abort = B_TRUE;
1518 /* Ignore subsequent events */
1522 * Poison batch to ensure the outer
1523 * loop is broken out of.
1525 EFSYS_ASSERT(batch <= EFX_EV_BATCH);
1526 batch += (EFX_EV_BATCH << 1);
1527 EFSYS_ASSERT(total != batch);
1533 * Now that the hardware has most likely moved onto dma'ing
1534 * into the next cache line, clear the processed events. Take
1535 * care to only clear out events that we've processed
1537 EFX_SET_QWORD(ev[0]);
1538 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1539 for (index = 0; index < total; ++index) {
1540 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
1541 offset += sizeof (efx_qword_t);
1546 } while (total == batch);
1551 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */