1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2007-2018 Solarflare Communications Inc.
14 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
16 (_eep)->ee_stat[_stat]++; \
17 _NOTE(CONSTANTCONDITION) \
20 #define EFX_EV_QSTAT_INCR(_eep, _stat)
23 #define EFX_EV_PRESENT(_qword) \
24 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
25 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
31 static __checkReturn efx_rc_t
39 static __checkReturn efx_rc_t
42 __in unsigned int index,
43 __in efsys_mem_t *esmp,
54 static __checkReturn efx_rc_t
57 __in unsigned int count);
64 static __checkReturn efx_rc_t
67 __in unsigned int us);
71 siena_ev_qstats_update(
73 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
77 #endif /* EFSYS_OPT_SIENA */
80 static const efx_ev_ops_t __efx_ev_siena_ops = {
81 siena_ev_init, /* eevo_init */
82 siena_ev_fini, /* eevo_fini */
83 siena_ev_qcreate, /* eevo_qcreate */
84 siena_ev_qdestroy, /* eevo_qdestroy */
85 siena_ev_qprime, /* eevo_qprime */
86 siena_ev_qpost, /* eevo_qpost */
87 siena_ev_qmoderate, /* eevo_qmoderate */
89 siena_ev_qstats_update, /* eevo_qstats_update */
92 #endif /* EFSYS_OPT_SIENA */
94 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
95 static const efx_ev_ops_t __efx_ev_ef10_ops = {
96 ef10_ev_init, /* eevo_init */
97 ef10_ev_fini, /* eevo_fini */
98 ef10_ev_qcreate, /* eevo_qcreate */
99 ef10_ev_qdestroy, /* eevo_qdestroy */
100 ef10_ev_qprime, /* eevo_qprime */
101 ef10_ev_qpost, /* eevo_qpost */
102 ef10_ev_qmoderate, /* eevo_qmoderate */
104 ef10_ev_qstats_update, /* eevo_qstats_update */
107 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
110 __checkReturn efx_rc_t
114 const efx_ev_ops_t *eevop;
117 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
118 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
120 if (enp->en_mod_flags & EFX_MOD_EV) {
125 switch (enp->en_family) {
127 case EFX_FAMILY_SIENA:
128 eevop = &__efx_ev_siena_ops;
130 #endif /* EFSYS_OPT_SIENA */
132 #if EFSYS_OPT_HUNTINGTON
133 case EFX_FAMILY_HUNTINGTON:
134 eevop = &__efx_ev_ef10_ops;
136 #endif /* EFSYS_OPT_HUNTINGTON */
138 #if EFSYS_OPT_MEDFORD
139 case EFX_FAMILY_MEDFORD:
140 eevop = &__efx_ev_ef10_ops;
142 #endif /* EFSYS_OPT_MEDFORD */
150 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
152 if ((rc = eevop->eevo_init(enp)) != 0)
155 enp->en_eevop = eevop;
156 enp->en_mod_flags |= EFX_MOD_EV;
163 EFSYS_PROBE1(fail1, efx_rc_t, rc);
165 enp->en_eevop = NULL;
166 enp->en_mod_flags &= ~EFX_MOD_EV;
174 const efx_ev_ops_t *eevop = enp->en_eevop;
176 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
177 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
178 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
179 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
180 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
181 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
183 eevop->eevo_fini(enp);
185 enp->en_eevop = NULL;
186 enp->en_mod_flags &= ~EFX_MOD_EV;
190 __checkReturn efx_rc_t
193 __in unsigned int index,
194 __in efsys_mem_t *esmp,
199 __deref_out efx_evq_t **eepp)
201 const efx_ev_ops_t *eevop = enp->en_eevop;
205 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
206 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
208 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
209 enp->en_nic_cfg.enc_evq_limit);
211 switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
212 case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
214 case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
225 /* Allocate an EVQ object */
226 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
232 eep->ee_magic = EFX_EVQ_MAGIC;
234 eep->ee_index = index;
235 eep->ee_mask = ndescs - 1;
236 eep->ee_flags = flags;
240 * Set outputs before the queue is created because interrupts may be
241 * raised for events immediately after the queue is created, before the
242 * function call below returns. See bug58606.
244 * The eepp pointer passed in by the client must therefore point to data
245 * shared with the client's event processing context.
250 if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
261 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
267 EFSYS_PROBE1(fail1, efx_rc_t, rc);
275 efx_nic_t *enp = eep->ee_enp;
276 const efx_ev_ops_t *eevop = enp->en_eevop;
278 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
280 EFSYS_ASSERT(enp->en_ev_qcount != 0);
283 eevop->eevo_qdestroy(eep);
285 /* Free the EVQ object */
286 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
289 __checkReturn efx_rc_t
292 __in unsigned int count)
294 efx_nic_t *enp = eep->ee_enp;
295 const efx_ev_ops_t *eevop = enp->en_eevop;
298 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
300 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
305 if ((rc = eevop->eevo_qprime(eep, count)) != 0)
313 EFSYS_PROBE1(fail1, efx_rc_t, rc);
317 __checkReturn boolean_t
320 __in unsigned int count)
325 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
327 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
328 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
330 return (EFX_EV_PRESENT(qword));
333 #if EFSYS_OPT_EV_PREFETCH
338 __in unsigned int count)
342 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
344 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
345 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
348 #endif /* EFSYS_OPT_EV_PREFETCH */
350 #define EFX_EV_BATCH 8
355 __inout unsigned int *countp,
356 __in const efx_ev_callbacks_t *eecp,
359 efx_qword_t ev[EFX_EV_BATCH];
366 /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
367 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
368 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
370 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
371 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
372 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
373 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
374 FSE_AZ_EV_CODE_DRV_GEN_EV);
376 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
377 FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
380 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
381 EFSYS_ASSERT(countp != NULL);
382 EFSYS_ASSERT(eecp != NULL);
386 /* Read up until the end of the batch period */
387 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
388 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
389 for (total = 0; total < batch; ++total) {
390 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
392 if (!EFX_EV_PRESENT(ev[total]))
395 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
396 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
397 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
399 offset += sizeof (efx_qword_t);
402 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
404 * Prefetch the next batch when we get within PREFETCH_PERIOD
405 * of a completed batch. If the batch is smaller, then prefetch
408 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
409 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
410 #endif /* EFSYS_OPT_EV_PREFETCH */
412 /* Process the batch of events */
413 for (index = 0; index < total; ++index) {
414 boolean_t should_abort;
417 #if EFSYS_OPT_EV_PREFETCH
418 /* Prefetch if we've now reached the batch period */
419 if (total == batch &&
420 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
421 offset = (count + batch) & eep->ee_mask;
422 offset *= sizeof (efx_qword_t);
424 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
426 #endif /* EFSYS_OPT_EV_PREFETCH */
428 EFX_EV_QSTAT_INCR(eep, EV_ALL);
430 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
432 case FSE_AZ_EV_CODE_RX_EV:
433 should_abort = eep->ee_rx(eep,
434 &(ev[index]), eecp, arg);
436 case FSE_AZ_EV_CODE_TX_EV:
437 should_abort = eep->ee_tx(eep,
438 &(ev[index]), eecp, arg);
440 case FSE_AZ_EV_CODE_DRIVER_EV:
441 should_abort = eep->ee_driver(eep,
442 &(ev[index]), eecp, arg);
444 case FSE_AZ_EV_CODE_DRV_GEN_EV:
445 should_abort = eep->ee_drv_gen(eep,
446 &(ev[index]), eecp, arg);
449 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
450 should_abort = eep->ee_mcdi(eep,
451 &(ev[index]), eecp, arg);
454 case FSE_AZ_EV_CODE_GLOBAL_EV:
455 if (eep->ee_global) {
456 should_abort = eep->ee_global(eep,
457 &(ev[index]), eecp, arg);
460 /* else fallthrough */
462 EFSYS_PROBE3(bad_event,
463 unsigned int, eep->ee_index,
465 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
467 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
469 EFSYS_ASSERT(eecp->eec_exception != NULL);
470 (void) eecp->eec_exception(arg,
471 EFX_EXCEPTION_EV_ERROR, code);
472 should_abort = B_TRUE;
475 /* Ignore subsequent events */
482 * Now that the hardware has most likely moved onto dma'ing
483 * into the next cache line, clear the processed events. Take
484 * care to only clear out events that we've processed
486 EFX_SET_QWORD(ev[0]);
487 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
488 for (index = 0; index < total; ++index) {
489 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
490 offset += sizeof (efx_qword_t);
495 } while (total == batch);
505 efx_nic_t *enp = eep->ee_enp;
506 const efx_ev_ops_t *eevop = enp->en_eevop;
508 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
510 EFSYS_ASSERT(eevop != NULL &&
511 eevop->eevo_qpost != NULL);
513 eevop->eevo_qpost(eep, data);
516 __checkReturn efx_rc_t
517 efx_ev_usecs_to_ticks(
519 __in unsigned int us,
520 __out unsigned int *ticksp)
522 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
525 /* Convert microseconds to a timer tick count */
528 else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
529 ticks = 1; /* Never round down to zero */
531 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
537 __checkReturn efx_rc_t
540 __in unsigned int us)
542 efx_nic_t *enp = eep->ee_enp;
543 const efx_ev_ops_t *eevop = enp->en_eevop;
546 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
548 if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
549 EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
554 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
562 EFSYS_PROBE1(fail1, efx_rc_t, rc);
568 efx_ev_qstats_update(
570 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
572 { efx_nic_t *enp = eep->ee_enp;
573 const efx_ev_ops_t *eevop = enp->en_eevop;
575 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
577 eevop->eevo_qstats_update(eep, stat);
580 #endif /* EFSYS_OPT_QSTATS */
584 static __checkReturn efx_rc_t
591 * Program the event queue for receive and transmit queue
594 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
595 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
596 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
602 static __checkReturn boolean_t
605 __in efx_qword_t *eqp,
608 __inout uint16_t *flagsp)
610 boolean_t ignore = B_FALSE;
612 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
613 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
614 EFSYS_PROBE(tobe_disc);
616 * Assume this is a unicast address mismatch, unless below
617 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
618 * EV_RX_PAUSE_FRM_ERR is set.
620 (*flagsp) |= EFX_ADDR_MISMATCH;
623 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
624 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
625 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
626 (*flagsp) |= EFX_DISCARD;
628 #if EFSYS_OPT_RX_SCATTER
630 * Lookout for payload queue ran dry errors and ignore them.
632 * Sadly for the header/data split cases, the descriptor
633 * pointer in this event refers to the header queue and
634 * therefore cannot be easily detected as duplicate.
635 * So we drop these and rely on the receive processing seeing
636 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
637 * the partially received packet.
639 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
640 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
641 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
643 #endif /* EFSYS_OPT_RX_SCATTER */
646 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
647 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
648 EFSYS_PROBE(crc_err);
649 (*flagsp) &= ~EFX_ADDR_MISMATCH;
650 (*flagsp) |= EFX_DISCARD;
653 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
654 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
655 EFSYS_PROBE(pause_frm_err);
656 (*flagsp) &= ~EFX_ADDR_MISMATCH;
657 (*flagsp) |= EFX_DISCARD;
660 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
661 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
662 EFSYS_PROBE(owner_id_err);
663 (*flagsp) |= EFX_DISCARD;
666 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
667 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
668 EFSYS_PROBE(ipv4_err);
669 (*flagsp) &= ~EFX_CKSUM_IPV4;
672 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
673 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
674 EFSYS_PROBE(udp_chk_err);
675 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
678 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
679 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
682 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
683 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
686 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
692 static __checkReturn boolean_t
695 __in efx_qword_t *eqp,
696 __in const efx_ev_callbacks_t *eecp,
703 #if EFSYS_OPT_RX_SCATTER
705 boolean_t jumbo_cont;
706 #endif /* EFSYS_OPT_RX_SCATTER */
711 boolean_t should_abort;
713 EFX_EV_QSTAT_INCR(eep, EV_RX);
715 /* Basic packet information */
716 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
717 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
718 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
719 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
721 #if EFSYS_OPT_RX_SCATTER
722 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
723 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
724 #endif /* EFSYS_OPT_RX_SCATTER */
726 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
728 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
731 * If packet is marked as OK and packet type is TCP/IP or
732 * UDP/IP or other IP, then we can rely on the hardware checksums.
735 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
736 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
738 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
739 flags |= EFX_PKT_IPV6;
741 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
742 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
746 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
747 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
749 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
750 flags |= EFX_PKT_IPV6;
752 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
753 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
757 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
759 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
760 flags = EFX_PKT_IPV6;
762 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
763 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
767 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
768 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
773 EFSYS_ASSERT(B_FALSE);
778 #if EFSYS_OPT_RX_SCATTER
779 /* Report scatter and header/lookahead split buffer flags */
781 flags |= EFX_PKT_START;
783 flags |= EFX_PKT_CONT;
784 #endif /* EFSYS_OPT_RX_SCATTER */
786 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
788 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
790 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
791 uint32_t, size, uint16_t, flags);
797 /* If we're not discarding the packet then it is ok */
798 if (~flags & EFX_DISCARD)
799 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
801 /* Detect multicast packets that didn't match the filter */
802 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
803 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
805 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
806 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
808 EFSYS_PROBE(mcast_mismatch);
809 flags |= EFX_ADDR_MISMATCH;
812 flags |= EFX_PKT_UNICAST;
816 * The packet parser in Siena can abort parsing packets under
817 * certain error conditions, setting the PKT_NOT_PARSED bit
818 * (which clears PKT_OK). If this is set, then don't trust
819 * the PKT_TYPE field.
824 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
826 flags |= EFX_CHECK_VLAN;
829 if (~flags & EFX_CHECK_VLAN) {
832 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
833 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
834 flags |= EFX_PKT_VLAN_TAGGED;
837 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
838 uint32_t, size, uint16_t, flags);
840 EFSYS_ASSERT(eecp->eec_rx != NULL);
841 should_abort = eecp->eec_rx(arg, label, id, size, flags);
843 return (should_abort);
846 static __checkReturn boolean_t
849 __in efx_qword_t *eqp,
850 __in const efx_ev_callbacks_t *eecp,
855 boolean_t should_abort;
857 EFX_EV_QSTAT_INCR(eep, EV_TX);
859 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
860 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
861 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
862 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
864 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
865 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
867 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
869 EFSYS_ASSERT(eecp->eec_tx != NULL);
870 should_abort = eecp->eec_tx(arg, label, id);
872 return (should_abort);
875 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
876 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
877 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
878 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
880 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
881 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
883 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
884 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
886 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
887 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
889 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
893 static __checkReturn boolean_t
896 __in efx_qword_t *eqp,
897 __in const efx_ev_callbacks_t *eecp,
900 _NOTE(ARGUNUSED(eqp, eecp, arg))
902 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
907 static __checkReturn boolean_t
910 __in efx_qword_t *eqp,
911 __in const efx_ev_callbacks_t *eecp,
914 boolean_t should_abort;
916 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
917 should_abort = B_FALSE;
919 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
920 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
923 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
925 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
927 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
929 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
930 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
934 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
938 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
939 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
941 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
942 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
945 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
947 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
949 should_abort = eecp->eec_rxq_flush_failed(arg,
952 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
954 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
956 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
961 case FSE_AZ_EVQ_INIT_DONE_EV:
962 EFSYS_ASSERT(eecp->eec_initialized != NULL);
963 should_abort = eecp->eec_initialized(arg);
967 case FSE_AZ_EVQ_NOT_EN_EV:
968 EFSYS_PROBE(evq_not_en);
971 case FSE_AZ_SRM_UPD_DONE_EV: {
974 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
976 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
978 EFSYS_ASSERT(eecp->eec_sram != NULL);
979 should_abort = eecp->eec_sram(arg, code);
983 case FSE_AZ_WAKE_UP_EV: {
986 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
988 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
989 should_abort = eecp->eec_wake_up(arg, id);
993 case FSE_AZ_TX_PKT_NON_TCP_UDP:
994 EFSYS_PROBE(tx_pkt_non_tcp_udp);
997 case FSE_AZ_TIMER_EV: {
1000 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1002 EFSYS_ASSERT(eecp->eec_timer != NULL);
1003 should_abort = eecp->eec_timer(arg, id);
1007 case FSE_AZ_RX_DSC_ERROR_EV:
1008 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
1010 EFSYS_PROBE(rx_dsc_error);
1012 EFSYS_ASSERT(eecp->eec_exception != NULL);
1013 should_abort = eecp->eec_exception(arg,
1014 EFX_EXCEPTION_RX_DSC_ERROR, 0);
1018 case FSE_AZ_TX_DSC_ERROR_EV:
1019 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
1021 EFSYS_PROBE(tx_dsc_error);
1023 EFSYS_ASSERT(eecp->eec_exception != NULL);
1024 should_abort = eecp->eec_exception(arg,
1025 EFX_EXCEPTION_TX_DSC_ERROR, 0);
1033 return (should_abort);
1036 static __checkReturn boolean_t
1038 __in efx_evq_t *eep,
1039 __in efx_qword_t *eqp,
1040 __in const efx_ev_callbacks_t *eecp,
1044 boolean_t should_abort;
1046 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1048 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
1049 if (data >= ((uint32_t)1 << 16)) {
1050 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1051 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1052 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1056 EFSYS_ASSERT(eecp->eec_software != NULL);
1057 should_abort = eecp->eec_software(arg, (uint16_t)data);
1059 return (should_abort);
1064 static __checkReturn boolean_t
1066 __in efx_evq_t *eep,
1067 __in efx_qword_t *eqp,
1068 __in const efx_ev_callbacks_t *eecp,
1071 efx_nic_t *enp = eep->ee_enp;
1073 boolean_t should_abort = B_FALSE;
1075 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
1077 if (enp->en_family != EFX_FAMILY_SIENA)
1080 EFSYS_ASSERT(eecp->eec_link_change != NULL);
1081 EFSYS_ASSERT(eecp->eec_exception != NULL);
1082 #if EFSYS_OPT_MON_STATS
1083 EFSYS_ASSERT(eecp->eec_monitor != NULL);
1086 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1088 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1090 case MCDI_EVENT_CODE_BADSSERT:
1091 efx_mcdi_ev_death(enp, EINTR);
1094 case MCDI_EVENT_CODE_CMDDONE:
1095 efx_mcdi_ev_cpl(enp,
1096 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1097 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1098 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1101 case MCDI_EVENT_CODE_LINKCHANGE: {
1102 efx_link_mode_t link_mode;
1104 siena_phy_link_ev(enp, eqp, &link_mode);
1105 should_abort = eecp->eec_link_change(arg, link_mode);
1108 case MCDI_EVENT_CODE_SENSOREVT: {
1109 #if EFSYS_OPT_MON_STATS
1111 efx_mon_stat_value_t value;
1114 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
1115 should_abort = eecp->eec_monitor(arg, id, value);
1116 else if (rc == ENOTSUP) {
1117 should_abort = eecp->eec_exception(arg,
1118 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1119 MCDI_EV_FIELD(eqp, DATA));
1121 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1123 should_abort = B_FALSE;
1127 case MCDI_EVENT_CODE_SCHEDERR:
1128 /* Informational only */
1131 case MCDI_EVENT_CODE_REBOOT:
1132 efx_mcdi_ev_death(enp, EIO);
1135 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1136 #if EFSYS_OPT_MAC_STATS
1137 if (eecp->eec_mac_stats != NULL) {
1138 eecp->eec_mac_stats(arg,
1139 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1144 case MCDI_EVENT_CODE_FWALERT: {
1145 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1147 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1148 should_abort = eecp->eec_exception(arg,
1149 EFX_EXCEPTION_FWALERT_SRAM,
1150 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1152 should_abort = eecp->eec_exception(arg,
1153 EFX_EXCEPTION_UNKNOWN_FWALERT,
1154 MCDI_EV_FIELD(eqp, DATA));
1159 EFSYS_PROBE1(mc_pcol_error, int, code);
1164 return (should_abort);
1167 #endif /* EFSYS_OPT_MCDI */
1169 static __checkReturn efx_rc_t
1171 __in efx_evq_t *eep,
1172 __in unsigned int count)
1174 efx_nic_t *enp = eep->ee_enp;
1178 rptr = count & eep->ee_mask;
1180 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
1182 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
1190 __in efx_evq_t *eep,
1193 efx_nic_t *enp = eep->ee_enp;
1197 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
1198 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
1200 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
1201 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
1202 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
1204 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
1207 static __checkReturn efx_rc_t
1209 __in efx_evq_t *eep,
1210 __in unsigned int us)
1212 efx_nic_t *enp = eep->ee_enp;
1213 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1214 unsigned int locked;
1218 if (us > encp->enc_evq_timer_max_us) {
1223 /* If the value is zero then disable the timer */
1225 EFX_POPULATE_DWORD_2(dword,
1226 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
1227 FRF_CZ_TC_TIMER_VAL, 0);
1231 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
1234 EFSYS_ASSERT(ticks > 0);
1235 EFX_POPULATE_DWORD_2(dword,
1236 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
1237 FRF_CZ_TC_TIMER_VAL, ticks - 1);
1240 locked = (eep->ee_index == 0) ? 1 : 0;
1242 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
1243 eep->ee_index, &dword, locked);
1250 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1255 static __checkReturn efx_rc_t
1257 __in efx_nic_t *enp,
1258 __in unsigned int index,
1259 __in efsys_mem_t *esmp,
1263 __in uint32_t flags,
1264 __in efx_evq_t *eep)
1266 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1270 boolean_t notify_mode;
1272 _NOTE(ARGUNUSED(esmp))
1274 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
1275 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
1277 if (!ISP2(ndescs) ||
1278 (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
1282 if (index >= encp->enc_evq_limit) {
1286 #if EFSYS_OPT_RX_SCALE
1287 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
1288 index >= EFX_MAXRSS_LEGACY) {
1293 for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
1295 if ((1 << size) == (int)(ndescs / EFX_EVQ_MINNEVS))
1297 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1302 /* Set up the handler table */
1303 eep->ee_rx = siena_ev_rx;
1304 eep->ee_tx = siena_ev_tx;
1305 eep->ee_driver = siena_ev_driver;
1306 eep->ee_global = siena_ev_global;
1307 eep->ee_drv_gen = siena_ev_drv_gen;
1309 eep->ee_mcdi = siena_ev_mcdi;
1310 #endif /* EFSYS_OPT_MCDI */
1312 notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
1313 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
1315 /* Set up the new event queue */
1316 EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
1317 FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
1318 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1319 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
1321 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
1322 FRF_AZ_EVQ_BUF_BASE_ID, id);
1324 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
1326 /* Set initial interrupt moderation */
1327 siena_ev_qmoderate(eep, us);
1333 #if EFSYS_OPT_RX_SCALE
1340 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1345 #endif /* EFSYS_OPT_SIENA */
1347 #if EFSYS_OPT_QSTATS
1349 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */
1350 static const char * const __efx_ev_qstat_name[] = {
1357 "rx_buf_owner_id_err",
1358 "rx_ipv4_hdr_chksum_err",
1359 "rx_tcp_udp_chksum_err",
1363 "rx_mcast_hash_match",
1380 "driver_srm_upd_done",
1381 "driver_tx_descq_fls_done",
1382 "driver_rx_descq_fls_done",
1383 "driver_rx_descq_fls_failed",
1384 "driver_rx_dsc_error",
1385 "driver_tx_dsc_error",
1389 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1393 __in efx_nic_t *enp,
1394 __in unsigned int id)
1396 _NOTE(ARGUNUSED(enp))
1398 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1399 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1401 return (__efx_ev_qstat_name[id]);
1403 #endif /* EFSYS_OPT_NAMES */
1404 #endif /* EFSYS_OPT_QSTATS */
1408 #if EFSYS_OPT_QSTATS
1410 siena_ev_qstats_update(
1411 __in efx_evq_t *eep,
1412 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1416 for (id = 0; id < EV_NQSTATS; id++) {
1417 efsys_stat_t *essp = &stat[id];
1419 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1420 eep->ee_stat[id] = 0;
1423 #endif /* EFSYS_OPT_QSTATS */
1427 __in efx_evq_t *eep)
1429 efx_nic_t *enp = eep->ee_enp;
1432 /* Purge event queue */
1433 EFX_ZERO_OWORD(oword);
1435 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1436 eep->ee_index, &oword, B_TRUE);
1438 EFX_ZERO_OWORD(oword);
1439 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
1444 __in efx_nic_t *enp)
1446 _NOTE(ARGUNUSED(enp))
1449 #endif /* EFSYS_OPT_SIENA */