1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2007-2019 Solarflare Communications Inc.
13 #define EFX_EV_PRESENT(_qword) \
14 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
15 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
21 static __checkReturn efx_rc_t
29 static __checkReturn efx_rc_t
32 __in unsigned int index,
33 __in efsys_mem_t *esmp,
44 static __checkReturn efx_rc_t
47 __in unsigned int count);
54 static __checkReturn efx_rc_t
57 __in unsigned int us);
61 siena_ev_qstats_update(
63 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
67 #endif /* EFSYS_OPT_SIENA */
69 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA
74 __inout unsigned int *countp,
75 __in const efx_ev_callbacks_t *eecp,
78 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */
81 static const efx_ev_ops_t __efx_ev_siena_ops = {
82 siena_ev_init, /* eevo_init */
83 siena_ev_fini, /* eevo_fini */
84 siena_ev_qcreate, /* eevo_qcreate */
85 siena_ev_qdestroy, /* eevo_qdestroy */
86 siena_ev_qprime, /* eevo_qprime */
87 siena_ev_qpost, /* eevo_qpost */
88 siena_ef10_ev_qpoll, /* eevo_qpoll */
89 siena_ev_qmoderate, /* eevo_qmoderate */
91 siena_ev_qstats_update, /* eevo_qstats_update */
94 #endif /* EFSYS_OPT_SIENA */
97 static const efx_ev_ops_t __efx_ev_ef10_ops = {
98 ef10_ev_init, /* eevo_init */
99 ef10_ev_fini, /* eevo_fini */
100 ef10_ev_qcreate, /* eevo_qcreate */
101 ef10_ev_qdestroy, /* eevo_qdestroy */
102 ef10_ev_qprime, /* eevo_qprime */
103 ef10_ev_qpost, /* eevo_qpost */
104 siena_ef10_ev_qpoll, /* eevo_qpoll */
105 ef10_ev_qmoderate, /* eevo_qmoderate */
107 ef10_ev_qstats_update, /* eevo_qstats_update */
110 #endif /* EFX_OPTS_EF10() */
113 __checkReturn efx_rc_t
117 const efx_ev_ops_t *eevop;
120 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
121 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
123 if (enp->en_mod_flags & EFX_MOD_EV) {
128 switch (enp->en_family) {
130 case EFX_FAMILY_SIENA:
131 eevop = &__efx_ev_siena_ops;
133 #endif /* EFSYS_OPT_SIENA */
135 #if EFSYS_OPT_HUNTINGTON
136 case EFX_FAMILY_HUNTINGTON:
137 eevop = &__efx_ev_ef10_ops;
139 #endif /* EFSYS_OPT_HUNTINGTON */
141 #if EFSYS_OPT_MEDFORD
142 case EFX_FAMILY_MEDFORD:
143 eevop = &__efx_ev_ef10_ops;
145 #endif /* EFSYS_OPT_MEDFORD */
147 #if EFSYS_OPT_MEDFORD2
148 case EFX_FAMILY_MEDFORD2:
149 eevop = &__efx_ev_ef10_ops;
151 #endif /* EFSYS_OPT_MEDFORD2 */
159 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
161 if ((rc = eevop->eevo_init(enp)) != 0)
164 enp->en_eevop = eevop;
165 enp->en_mod_flags |= EFX_MOD_EV;
172 EFSYS_PROBE1(fail1, efx_rc_t, rc);
174 enp->en_eevop = NULL;
175 enp->en_mod_flags &= ~EFX_MOD_EV;
181 __in const efx_nic_t *enp,
182 __in unsigned int ndescs)
184 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
186 return (ndescs * encp->enc_ev_desc_size);
189 __checkReturn unsigned int
191 __in const efx_nic_t *enp,
192 __in unsigned int ndescs)
194 return (EFX_DIV_ROUND_UP(efx_evq_size(enp, ndescs), EFX_BUF_SIZE));
201 const efx_ev_ops_t *eevop = enp->en_eevop;
203 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
204 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
205 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
206 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
207 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
208 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
210 eevop->eevo_fini(enp);
212 enp->en_eevop = NULL;
213 enp->en_mod_flags &= ~EFX_MOD_EV;
217 __checkReturn efx_rc_t
220 __in unsigned int index,
221 __in efsys_mem_t *esmp,
226 __deref_out efx_evq_t **eepp)
228 const efx_ev_ops_t *eevop = enp->en_eevop;
230 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
233 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
234 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
236 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
237 enp->en_nic_cfg.enc_evq_limit);
239 if (index >= encp->enc_evq_limit) {
244 if (us > encp->enc_evq_timer_max_us) {
249 switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
250 case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
252 case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
263 EFSYS_ASSERT(ISP2(encp->enc_evq_max_nevs));
264 EFSYS_ASSERT(ISP2(encp->enc_evq_min_nevs));
267 ndescs < encp->enc_evq_min_nevs ||
268 ndescs > encp->enc_evq_max_nevs) {
273 /* Allocate an EVQ object */
274 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
280 eep->ee_magic = EFX_EVQ_MAGIC;
282 eep->ee_index = index;
283 eep->ee_mask = ndescs - 1;
284 eep->ee_flags = flags;
288 * Set outputs before the queue is created because interrupts may be
289 * raised for events immediately after the queue is created, before the
290 * function call below returns. See bug58606.
292 * The eepp pointer passed in by the client must therefore point to data
293 * shared with the client's event processing context.
298 if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
309 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
321 EFSYS_PROBE1(fail1, efx_rc_t, rc);
329 efx_nic_t *enp = eep->ee_enp;
330 const efx_ev_ops_t *eevop = enp->en_eevop;
332 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
334 EFSYS_ASSERT(enp->en_ev_qcount != 0);
337 eevop->eevo_qdestroy(eep);
339 /* Free the EVQ object */
340 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
343 __checkReturn efx_rc_t
346 __in unsigned int count)
348 efx_nic_t *enp = eep->ee_enp;
349 const efx_ev_ops_t *eevop = enp->en_eevop;
352 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
354 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
359 if ((rc = eevop->eevo_qprime(eep, count)) != 0)
367 EFSYS_PROBE1(fail1, efx_rc_t, rc);
371 __checkReturn boolean_t
374 __in unsigned int count)
379 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
381 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
382 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
384 return (EFX_EV_PRESENT(qword));
387 #if EFSYS_OPT_EV_PREFETCH
392 __in unsigned int count)
396 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
398 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
399 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
402 #endif /* EFSYS_OPT_EV_PREFETCH */
407 __inout unsigned int *countp,
408 __in const efx_ev_callbacks_t *eecp,
411 efx_nic_t *enp = eep->ee_enp;
412 const efx_ev_ops_t *eevop = enp->en_eevop;
414 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
416 EFSYS_ASSERT(eevop != NULL &&
417 eevop->eevo_qpoll != NULL);
419 eevop->eevo_qpoll(eep, countp, eecp, arg);
427 efx_nic_t *enp = eep->ee_enp;
428 const efx_ev_ops_t *eevop = enp->en_eevop;
430 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
432 EFSYS_ASSERT(eevop != NULL &&
433 eevop->eevo_qpost != NULL);
435 eevop->eevo_qpost(eep, data);
438 __checkReturn efx_rc_t
439 efx_ev_usecs_to_ticks(
441 __in unsigned int us,
442 __out unsigned int *ticksp)
444 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
448 if (encp->enc_evq_timer_quantum_ns == 0) {
453 /* Convert microseconds to a timer tick count */
456 else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
457 ticks = 1; /* Never round down to zero */
459 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
465 EFSYS_PROBE1(fail1, efx_rc_t, rc);
469 __checkReturn efx_rc_t
472 __in unsigned int us)
474 efx_nic_t *enp = eep->ee_enp;
475 const efx_ev_ops_t *eevop = enp->en_eevop;
478 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
480 if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
481 EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
486 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
494 EFSYS_PROBE1(fail1, efx_rc_t, rc);
500 efx_ev_qstats_update(
502 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
504 { efx_nic_t *enp = eep->ee_enp;
505 const efx_ev_ops_t *eevop = enp->en_eevop;
507 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
509 eevop->eevo_qstats_update(eep, stat);
512 #endif /* EFSYS_OPT_QSTATS */
516 static __checkReturn efx_rc_t
523 * Program the event queue for receive and transmit queue
526 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
527 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
528 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
534 static __checkReturn boolean_t
537 __in efx_qword_t *eqp,
540 __inout uint16_t *flagsp)
542 boolean_t ignore = B_FALSE;
544 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
545 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
546 EFSYS_PROBE(tobe_disc);
548 * Assume this is a unicast address mismatch, unless below
549 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
550 * EV_RX_PAUSE_FRM_ERR is set.
552 (*flagsp) |= EFX_ADDR_MISMATCH;
555 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
556 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
557 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
558 (*flagsp) |= EFX_DISCARD;
560 #if EFSYS_OPT_RX_SCATTER
562 * Lookout for payload queue ran dry errors and ignore them.
564 * Sadly for the header/data split cases, the descriptor
565 * pointer in this event refers to the header queue and
566 * therefore cannot be easily detected as duplicate.
567 * So we drop these and rely on the receive processing seeing
568 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
569 * the partially received packet.
571 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
572 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
573 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
575 #endif /* EFSYS_OPT_RX_SCATTER */
578 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
579 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
580 EFSYS_PROBE(crc_err);
581 (*flagsp) &= ~EFX_ADDR_MISMATCH;
582 (*flagsp) |= EFX_DISCARD;
585 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
586 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
587 EFSYS_PROBE(pause_frm_err);
588 (*flagsp) &= ~EFX_ADDR_MISMATCH;
589 (*flagsp) |= EFX_DISCARD;
592 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
593 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
594 EFSYS_PROBE(owner_id_err);
595 (*flagsp) |= EFX_DISCARD;
598 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
599 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
600 EFSYS_PROBE(ipv4_err);
601 (*flagsp) &= ~EFX_CKSUM_IPV4;
604 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
605 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
606 EFSYS_PROBE(udp_chk_err);
607 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
610 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
611 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
614 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
615 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
618 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
624 static __checkReturn boolean_t
627 __in efx_qword_t *eqp,
628 __in const efx_ev_callbacks_t *eecp,
635 #if EFSYS_OPT_RX_SCATTER
637 boolean_t jumbo_cont;
638 #endif /* EFSYS_OPT_RX_SCATTER */
643 boolean_t should_abort;
645 EFX_EV_QSTAT_INCR(eep, EV_RX);
647 /* Basic packet information */
648 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
649 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
650 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
651 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
653 #if EFSYS_OPT_RX_SCATTER
654 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
655 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
656 #endif /* EFSYS_OPT_RX_SCATTER */
658 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
660 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
663 * If packet is marked as OK and packet type is TCP/IP or
664 * UDP/IP or other IP, then we can rely on the hardware checksums.
667 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
668 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
670 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
671 flags |= EFX_PKT_IPV6;
673 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
674 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
678 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
679 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
681 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
682 flags |= EFX_PKT_IPV6;
684 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
685 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
689 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
691 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
692 flags = EFX_PKT_IPV6;
694 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
695 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
699 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
700 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
705 EFSYS_ASSERT(B_FALSE);
710 #if EFSYS_OPT_RX_SCATTER
711 /* Report scatter and header/lookahead split buffer flags */
713 flags |= EFX_PKT_START;
715 flags |= EFX_PKT_CONT;
716 #endif /* EFSYS_OPT_RX_SCATTER */
718 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
720 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
722 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
723 uint32_t, size, uint16_t, flags);
729 /* If we're not discarding the packet then it is ok */
730 if (~flags & EFX_DISCARD)
731 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
733 /* Detect multicast packets that didn't match the filter */
734 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
735 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
737 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
738 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
740 EFSYS_PROBE(mcast_mismatch);
741 flags |= EFX_ADDR_MISMATCH;
744 flags |= EFX_PKT_UNICAST;
748 * The packet parser in Siena can abort parsing packets under
749 * certain error conditions, setting the PKT_NOT_PARSED bit
750 * (which clears PKT_OK). If this is set, then don't trust
751 * the PKT_TYPE field.
756 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
758 flags |= EFX_CHECK_VLAN;
761 if (~flags & EFX_CHECK_VLAN) {
764 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
765 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
766 flags |= EFX_PKT_VLAN_TAGGED;
769 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
770 uint32_t, size, uint16_t, flags);
772 EFSYS_ASSERT(eecp->eec_rx != NULL);
773 should_abort = eecp->eec_rx(arg, label, id, size, flags);
775 return (should_abort);
778 static __checkReturn boolean_t
781 __in efx_qword_t *eqp,
782 __in const efx_ev_callbacks_t *eecp,
787 boolean_t should_abort;
789 EFX_EV_QSTAT_INCR(eep, EV_TX);
791 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
792 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
793 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
794 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
796 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
797 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
799 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
801 EFSYS_ASSERT(eecp->eec_tx != NULL);
802 should_abort = eecp->eec_tx(arg, label, id);
804 return (should_abort);
807 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
808 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
809 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
810 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
812 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
813 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
815 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
816 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
818 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
819 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
821 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
825 static __checkReturn boolean_t
828 __in efx_qword_t *eqp,
829 __in const efx_ev_callbacks_t *eecp,
832 _NOTE(ARGUNUSED(eqp, eecp, arg))
834 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
839 static __checkReturn boolean_t
842 __in efx_qword_t *eqp,
843 __in const efx_ev_callbacks_t *eecp,
846 boolean_t should_abort;
848 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
849 should_abort = B_FALSE;
851 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
852 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
855 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
857 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
859 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
861 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
862 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
866 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
870 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
871 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
873 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
874 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
877 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
879 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
881 should_abort = eecp->eec_rxq_flush_failed(arg,
884 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
886 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
888 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
893 case FSE_AZ_EVQ_INIT_DONE_EV:
894 EFSYS_ASSERT(eecp->eec_initialized != NULL);
895 should_abort = eecp->eec_initialized(arg);
899 case FSE_AZ_EVQ_NOT_EN_EV:
900 EFSYS_PROBE(evq_not_en);
903 case FSE_AZ_SRM_UPD_DONE_EV: {
906 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
908 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
910 EFSYS_ASSERT(eecp->eec_sram != NULL);
911 should_abort = eecp->eec_sram(arg, code);
915 case FSE_AZ_WAKE_UP_EV: {
918 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
920 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
921 should_abort = eecp->eec_wake_up(arg, id);
925 case FSE_AZ_TX_PKT_NON_TCP_UDP:
926 EFSYS_PROBE(tx_pkt_non_tcp_udp);
929 case FSE_AZ_TIMER_EV: {
932 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
934 EFSYS_ASSERT(eecp->eec_timer != NULL);
935 should_abort = eecp->eec_timer(arg, id);
939 case FSE_AZ_RX_DSC_ERROR_EV:
940 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
942 EFSYS_PROBE(rx_dsc_error);
944 EFSYS_ASSERT(eecp->eec_exception != NULL);
945 should_abort = eecp->eec_exception(arg,
946 EFX_EXCEPTION_RX_DSC_ERROR, 0);
950 case FSE_AZ_TX_DSC_ERROR_EV:
951 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
953 EFSYS_PROBE(tx_dsc_error);
955 EFSYS_ASSERT(eecp->eec_exception != NULL);
956 should_abort = eecp->eec_exception(arg,
957 EFX_EXCEPTION_TX_DSC_ERROR, 0);
965 return (should_abort);
968 static __checkReturn boolean_t
971 __in efx_qword_t *eqp,
972 __in const efx_ev_callbacks_t *eecp,
976 boolean_t should_abort;
978 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
980 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
981 if (data >= ((uint32_t)1 << 16)) {
982 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
983 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
984 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
988 EFSYS_ASSERT(eecp->eec_software != NULL);
989 should_abort = eecp->eec_software(arg, (uint16_t)data);
991 return (should_abort);
996 static __checkReturn boolean_t
999 __in efx_qword_t *eqp,
1000 __in const efx_ev_callbacks_t *eecp,
1003 efx_nic_t *enp = eep->ee_enp;
1005 boolean_t should_abort = B_FALSE;
1007 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
1009 if (enp->en_family != EFX_FAMILY_SIENA)
1012 EFSYS_ASSERT(eecp->eec_link_change != NULL);
1013 EFSYS_ASSERT(eecp->eec_exception != NULL);
1014 #if EFSYS_OPT_MON_STATS
1015 EFSYS_ASSERT(eecp->eec_monitor != NULL);
1018 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1020 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1022 case MCDI_EVENT_CODE_BADSSERT:
1023 efx_mcdi_ev_death(enp, EINTR);
1026 case MCDI_EVENT_CODE_CMDDONE:
1027 efx_mcdi_ev_cpl(enp,
1028 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1029 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1030 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1033 case MCDI_EVENT_CODE_LINKCHANGE: {
1034 efx_link_mode_t link_mode;
1036 siena_phy_link_ev(enp, eqp, &link_mode);
1037 should_abort = eecp->eec_link_change(arg, link_mode);
1040 case MCDI_EVENT_CODE_SENSOREVT: {
1041 #if EFSYS_OPT_MON_STATS
1043 efx_mon_stat_value_t value;
1046 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
1047 should_abort = eecp->eec_monitor(arg, id, value);
1048 else if (rc == ENOTSUP) {
1049 should_abort = eecp->eec_exception(arg,
1050 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1051 MCDI_EV_FIELD(eqp, DATA));
1053 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1055 should_abort = B_FALSE;
1059 case MCDI_EVENT_CODE_SCHEDERR:
1060 /* Informational only */
1063 case MCDI_EVENT_CODE_REBOOT:
1064 efx_mcdi_ev_death(enp, EIO);
1067 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1068 #if EFSYS_OPT_MAC_STATS
1069 if (eecp->eec_mac_stats != NULL) {
1070 eecp->eec_mac_stats(arg,
1071 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1076 case MCDI_EVENT_CODE_FWALERT: {
1077 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1079 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1080 should_abort = eecp->eec_exception(arg,
1081 EFX_EXCEPTION_FWALERT_SRAM,
1082 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1084 should_abort = eecp->eec_exception(arg,
1085 EFX_EXCEPTION_UNKNOWN_FWALERT,
1086 MCDI_EV_FIELD(eqp, DATA));
1091 EFSYS_PROBE1(mc_pcol_error, int, code);
1096 return (should_abort);
1099 #endif /* EFSYS_OPT_MCDI */
1101 static __checkReturn efx_rc_t
1103 __in efx_evq_t *eep,
1104 __in unsigned int count)
1106 efx_nic_t *enp = eep->ee_enp;
1110 rptr = count & eep->ee_mask;
1112 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
1114 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
1122 __in efx_evq_t *eep,
1125 efx_nic_t *enp = eep->ee_enp;
1129 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
1130 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
1132 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
1133 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
1134 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
1136 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
1139 static __checkReturn efx_rc_t
1141 __in efx_evq_t *eep,
1142 __in unsigned int us)
1144 efx_nic_t *enp = eep->ee_enp;
1145 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1146 unsigned int locked;
1150 if (us > encp->enc_evq_timer_max_us) {
1155 /* If the value is zero then disable the timer */
1157 EFX_POPULATE_DWORD_2(dword,
1158 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
1159 FRF_CZ_TC_TIMER_VAL, 0);
1163 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
1166 EFSYS_ASSERT(ticks > 0);
1167 EFX_POPULATE_DWORD_2(dword,
1168 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
1169 FRF_CZ_TC_TIMER_VAL, ticks - 1);
1172 locked = (eep->ee_index == 0) ? 1 : 0;
1174 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
1175 eep->ee_index, &dword, locked);
1182 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1187 static __checkReturn efx_rc_t
1189 __in efx_nic_t *enp,
1190 __in unsigned int index,
1191 __in efsys_mem_t *esmp,
1195 __in uint32_t flags,
1196 __in efx_evq_t *eep)
1198 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1202 boolean_t notify_mode;
1204 _NOTE(ARGUNUSED(esmp))
1206 #if EFSYS_OPT_RX_SCALE
1207 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
1208 index >= EFX_MAXRSS_LEGACY) {
1214 (1U << size) <= encp->enc_evq_max_nevs / encp->enc_evq_min_nevs;
1216 if ((1U << size) == (uint32_t)ndescs / encp->enc_evq_min_nevs)
1218 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1223 /* Set up the handler table */
1224 eep->ee_rx = siena_ev_rx;
1225 eep->ee_tx = siena_ev_tx;
1226 eep->ee_driver = siena_ev_driver;
1227 eep->ee_global = siena_ev_global;
1228 eep->ee_drv_gen = siena_ev_drv_gen;
1230 eep->ee_mcdi = siena_ev_mcdi;
1231 #endif /* EFSYS_OPT_MCDI */
1233 notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
1234 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
1236 /* Set up the new event queue */
1237 EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
1238 FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
1239 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1240 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
1242 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
1243 FRF_AZ_EVQ_BUF_BASE_ID, id);
1245 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
1247 /* Set initial interrupt moderation */
1248 siena_ev_qmoderate(eep, us);
1254 #if EFSYS_OPT_RX_SCALE
1257 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1262 #endif /* EFSYS_OPT_SIENA */
1264 #if EFSYS_OPT_QSTATS
1266 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock ac223f7134058b4f */
1267 static const char * const __efx_ev_qstat_name[] = {
1274 "rx_buf_owner_id_err",
1275 "rx_ipv4_hdr_chksum_err",
1276 "rx_tcp_udp_chksum_err",
1280 "rx_mcast_hash_match",
1297 "driver_srm_upd_done",
1298 "driver_tx_descq_fls_done",
1299 "driver_rx_descq_fls_done",
1300 "driver_rx_descq_fls_failed",
1301 "driver_rx_dsc_error",
1302 "driver_tx_dsc_error",
1305 "rx_parse_incomplete",
1307 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1311 __in efx_nic_t *enp,
1312 __in unsigned int id)
1314 _NOTE(ARGUNUSED(enp))
1316 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1317 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1319 return (__efx_ev_qstat_name[id]);
1321 #endif /* EFSYS_OPT_NAMES */
1322 #endif /* EFSYS_OPT_QSTATS */
1326 #if EFSYS_OPT_QSTATS
1328 siena_ev_qstats_update(
1329 __in efx_evq_t *eep,
1330 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1334 for (id = 0; id < EV_NQSTATS; id++) {
1335 efsys_stat_t *essp = &stat[id];
1337 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1338 eep->ee_stat[id] = 0;
1341 #endif /* EFSYS_OPT_QSTATS */
1345 __in efx_evq_t *eep)
1347 efx_nic_t *enp = eep->ee_enp;
1350 /* Purge event queue */
1351 EFX_ZERO_OWORD(oword);
1353 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1354 eep->ee_index, &oword, B_TRUE);
1356 EFX_ZERO_OWORD(oword);
1357 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
1362 __in efx_nic_t *enp)
1364 _NOTE(ARGUNUSED(enp))
1367 #endif /* EFSYS_OPT_SIENA */
1369 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA
1371 #define EFX_EV_BATCH 8
1374 siena_ef10_ev_qpoll(
1375 __in efx_evq_t *eep,
1376 __inout unsigned int *countp,
1377 __in const efx_ev_callbacks_t *eecp,
1380 efx_qword_t ev[EFX_EV_BATCH];
1387 /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
1388 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
1389 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
1391 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
1392 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
1393 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
1394 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
1395 FSE_AZ_EV_CODE_DRV_GEN_EV);
1397 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
1398 FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
1401 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
1402 EFSYS_ASSERT(countp != NULL);
1403 EFSYS_ASSERT(eecp != NULL);
1407 /* Read up until the end of the batch period */
1408 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
1409 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1410 for (total = 0; total < batch; ++total) {
1411 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
1413 if (!EFX_EV_PRESENT(ev[total]))
1416 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
1417 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
1418 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
1420 offset += sizeof (efx_qword_t);
1423 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
1425 * Prefetch the next batch when we get within PREFETCH_PERIOD
1426 * of a completed batch. If the batch is smaller, then prefetch
1429 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
1430 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1431 #endif /* EFSYS_OPT_EV_PREFETCH */
1433 /* Process the batch of events */
1434 for (index = 0; index < total; ++index) {
1435 boolean_t should_abort;
1438 #if EFSYS_OPT_EV_PREFETCH
1439 /* Prefetch if we've now reached the batch period */
1440 if (total == batch &&
1441 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
1442 offset = (count + batch) & eep->ee_mask;
1443 offset *= sizeof (efx_qword_t);
1445 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1447 #endif /* EFSYS_OPT_EV_PREFETCH */
1449 EFX_EV_QSTAT_INCR(eep, EV_ALL);
1451 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
1453 case FSE_AZ_EV_CODE_RX_EV:
1454 should_abort = eep->ee_rx(eep,
1455 &(ev[index]), eecp, arg);
1457 case FSE_AZ_EV_CODE_TX_EV:
1458 should_abort = eep->ee_tx(eep,
1459 &(ev[index]), eecp, arg);
1461 case FSE_AZ_EV_CODE_DRIVER_EV:
1462 should_abort = eep->ee_driver(eep,
1463 &(ev[index]), eecp, arg);
1465 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1466 should_abort = eep->ee_drv_gen(eep,
1467 &(ev[index]), eecp, arg);
1470 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
1471 should_abort = eep->ee_mcdi(eep,
1472 &(ev[index]), eecp, arg);
1475 case FSE_AZ_EV_CODE_GLOBAL_EV:
1476 if (eep->ee_global) {
1477 should_abort = eep->ee_global(eep,
1478 &(ev[index]), eecp, arg);
1481 /* else fallthrough */
1483 EFSYS_PROBE3(bad_event,
1484 unsigned int, eep->ee_index,
1486 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
1488 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
1490 EFSYS_ASSERT(eecp->eec_exception != NULL);
1491 (void) eecp->eec_exception(arg,
1492 EFX_EXCEPTION_EV_ERROR, code);
1493 should_abort = B_TRUE;
1496 /* Ignore subsequent events */
1500 * Poison batch to ensure the outer
1501 * loop is broken out of.
1503 EFSYS_ASSERT(batch <= EFX_EV_BATCH);
1504 batch += (EFX_EV_BATCH << 1);
1505 EFSYS_ASSERT(total != batch);
1511 * Now that the hardware has most likely moved onto dma'ing
1512 * into the next cache line, clear the processed events. Take
1513 * care to only clear out events that we've processed
1515 EFX_SET_QWORD(ev[0]);
1516 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1517 for (index = 0; index < total; ++index) {
1518 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
1519 offset += sizeof (efx_qword_t);
1524 } while (total == batch);
1529 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */