1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2007-2018 Solarflare Communications Inc.
14 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
16 (_eep)->ee_stat[_stat]++; \
17 _NOTE(CONSTANTCONDITION) \
20 #define EFX_EV_QSTAT_INCR(_eep, _stat)
23 #define EFX_EV_PRESENT(_qword) \
24 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
25 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
31 static __checkReturn efx_rc_t
39 static __checkReturn efx_rc_t
42 __in unsigned int index,
43 __in efsys_mem_t *esmp,
54 static __checkReturn efx_rc_t
57 __in unsigned int count);
64 static __checkReturn efx_rc_t
67 __in unsigned int us);
71 siena_ev_qstats_update(
73 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
77 #endif /* EFSYS_OPT_SIENA */
80 static const efx_ev_ops_t __efx_ev_siena_ops = {
81 siena_ev_init, /* eevo_init */
82 siena_ev_fini, /* eevo_fini */
83 siena_ev_qcreate, /* eevo_qcreate */
84 siena_ev_qdestroy, /* eevo_qdestroy */
85 siena_ev_qprime, /* eevo_qprime */
86 siena_ev_qpost, /* eevo_qpost */
87 siena_ev_qmoderate, /* eevo_qmoderate */
89 siena_ev_qstats_update, /* eevo_qstats_update */
92 #endif /* EFSYS_OPT_SIENA */
94 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
95 static const efx_ev_ops_t __efx_ev_ef10_ops = {
96 ef10_ev_init, /* eevo_init */
97 ef10_ev_fini, /* eevo_fini */
98 ef10_ev_qcreate, /* eevo_qcreate */
99 ef10_ev_qdestroy, /* eevo_qdestroy */
100 ef10_ev_qprime, /* eevo_qprime */
101 ef10_ev_qpost, /* eevo_qpost */
102 ef10_ev_qmoderate, /* eevo_qmoderate */
104 ef10_ev_qstats_update, /* eevo_qstats_update */
107 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
110 __checkReturn efx_rc_t
114 const efx_ev_ops_t *eevop;
117 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
118 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
120 if (enp->en_mod_flags & EFX_MOD_EV) {
125 switch (enp->en_family) {
127 case EFX_FAMILY_SIENA:
128 eevop = &__efx_ev_siena_ops;
130 #endif /* EFSYS_OPT_SIENA */
132 #if EFSYS_OPT_HUNTINGTON
133 case EFX_FAMILY_HUNTINGTON:
134 eevop = &__efx_ev_ef10_ops;
136 #endif /* EFSYS_OPT_HUNTINGTON */
138 #if EFSYS_OPT_MEDFORD
139 case EFX_FAMILY_MEDFORD:
140 eevop = &__efx_ev_ef10_ops;
142 #endif /* EFSYS_OPT_MEDFORD */
144 #if EFSYS_OPT_MEDFORD2
145 case EFX_FAMILY_MEDFORD2:
146 eevop = &__efx_ev_ef10_ops;
148 #endif /* EFSYS_OPT_MEDFORD2 */
156 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
158 if ((rc = eevop->eevo_init(enp)) != 0)
161 enp->en_eevop = eevop;
162 enp->en_mod_flags |= EFX_MOD_EV;
169 EFSYS_PROBE1(fail1, efx_rc_t, rc);
171 enp->en_eevop = NULL;
172 enp->en_mod_flags &= ~EFX_MOD_EV;
180 const efx_ev_ops_t *eevop = enp->en_eevop;
182 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
183 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
184 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
185 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
186 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
187 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
189 eevop->eevo_fini(enp);
191 enp->en_eevop = NULL;
192 enp->en_mod_flags &= ~EFX_MOD_EV;
196 __checkReturn efx_rc_t
199 __in unsigned int index,
200 __in efsys_mem_t *esmp,
205 __deref_out efx_evq_t **eepp)
207 const efx_ev_ops_t *eevop = enp->en_eevop;
209 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
212 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
213 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
215 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
216 enp->en_nic_cfg.enc_evq_limit);
218 switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
219 case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
221 case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
232 EFSYS_ASSERT(ISP2(encp->enc_evq_max_nevs));
233 EFSYS_ASSERT(ISP2(encp->enc_evq_min_nevs));
236 ndescs < encp->enc_evq_min_nevs ||
237 ndescs > encp->enc_evq_max_nevs) {
242 /* Allocate an EVQ object */
243 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
249 eep->ee_magic = EFX_EVQ_MAGIC;
251 eep->ee_index = index;
252 eep->ee_mask = ndescs - 1;
253 eep->ee_flags = flags;
257 * Set outputs before the queue is created because interrupts may be
258 * raised for events immediately after the queue is created, before the
259 * function call below returns. See bug58606.
261 * The eepp pointer passed in by the client must therefore point to data
262 * shared with the client's event processing context.
267 if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
278 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
286 EFSYS_PROBE1(fail1, efx_rc_t, rc);
294 efx_nic_t *enp = eep->ee_enp;
295 const efx_ev_ops_t *eevop = enp->en_eevop;
297 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
299 EFSYS_ASSERT(enp->en_ev_qcount != 0);
302 eevop->eevo_qdestroy(eep);
304 /* Free the EVQ object */
305 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
308 __checkReturn efx_rc_t
311 __in unsigned int count)
313 efx_nic_t *enp = eep->ee_enp;
314 const efx_ev_ops_t *eevop = enp->en_eevop;
317 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
319 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
324 if ((rc = eevop->eevo_qprime(eep, count)) != 0)
332 EFSYS_PROBE1(fail1, efx_rc_t, rc);
336 __checkReturn boolean_t
339 __in unsigned int count)
344 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
346 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
347 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
349 return (EFX_EV_PRESENT(qword));
352 #if EFSYS_OPT_EV_PREFETCH
357 __in unsigned int count)
361 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
363 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
364 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
367 #endif /* EFSYS_OPT_EV_PREFETCH */
369 #define EFX_EV_BATCH 8
374 __inout unsigned int *countp,
375 __in const efx_ev_callbacks_t *eecp,
378 efx_qword_t ev[EFX_EV_BATCH];
385 /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
386 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
387 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
389 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
390 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
391 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
392 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
393 FSE_AZ_EV_CODE_DRV_GEN_EV);
395 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
396 FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
399 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
400 EFSYS_ASSERT(countp != NULL);
401 EFSYS_ASSERT(eecp != NULL);
405 /* Read up until the end of the batch period */
406 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
407 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
408 for (total = 0; total < batch; ++total) {
409 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
411 if (!EFX_EV_PRESENT(ev[total]))
414 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
415 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
416 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
418 offset += sizeof (efx_qword_t);
421 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
423 * Prefetch the next batch when we get within PREFETCH_PERIOD
424 * of a completed batch. If the batch is smaller, then prefetch
427 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
428 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
429 #endif /* EFSYS_OPT_EV_PREFETCH */
431 /* Process the batch of events */
432 for (index = 0; index < total; ++index) {
433 boolean_t should_abort;
436 #if EFSYS_OPT_EV_PREFETCH
437 /* Prefetch if we've now reached the batch period */
438 if (total == batch &&
439 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
440 offset = (count + batch) & eep->ee_mask;
441 offset *= sizeof (efx_qword_t);
443 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
445 #endif /* EFSYS_OPT_EV_PREFETCH */
447 EFX_EV_QSTAT_INCR(eep, EV_ALL);
449 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
451 case FSE_AZ_EV_CODE_RX_EV:
452 should_abort = eep->ee_rx(eep,
453 &(ev[index]), eecp, arg);
455 case FSE_AZ_EV_CODE_TX_EV:
456 should_abort = eep->ee_tx(eep,
457 &(ev[index]), eecp, arg);
459 case FSE_AZ_EV_CODE_DRIVER_EV:
460 should_abort = eep->ee_driver(eep,
461 &(ev[index]), eecp, arg);
463 case FSE_AZ_EV_CODE_DRV_GEN_EV:
464 should_abort = eep->ee_drv_gen(eep,
465 &(ev[index]), eecp, arg);
468 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
469 should_abort = eep->ee_mcdi(eep,
470 &(ev[index]), eecp, arg);
473 case FSE_AZ_EV_CODE_GLOBAL_EV:
474 if (eep->ee_global) {
475 should_abort = eep->ee_global(eep,
476 &(ev[index]), eecp, arg);
479 /* else fallthrough */
481 EFSYS_PROBE3(bad_event,
482 unsigned int, eep->ee_index,
484 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
486 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
488 EFSYS_ASSERT(eecp->eec_exception != NULL);
489 (void) eecp->eec_exception(arg,
490 EFX_EXCEPTION_EV_ERROR, code);
491 should_abort = B_TRUE;
494 /* Ignore subsequent events */
498 * Poison batch to ensure the outer
499 * loop is broken out of.
501 EFSYS_ASSERT(batch <= EFX_EV_BATCH);
502 batch += (EFX_EV_BATCH << 1);
503 EFSYS_ASSERT(total != batch);
509 * Now that the hardware has most likely moved onto dma'ing
510 * into the next cache line, clear the processed events. Take
511 * care to only clear out events that we've processed
513 EFX_SET_QWORD(ev[0]);
514 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
515 for (index = 0; index < total; ++index) {
516 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
517 offset += sizeof (efx_qword_t);
522 } while (total == batch);
532 efx_nic_t *enp = eep->ee_enp;
533 const efx_ev_ops_t *eevop = enp->en_eevop;
535 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
537 EFSYS_ASSERT(eevop != NULL &&
538 eevop->eevo_qpost != NULL);
540 eevop->eevo_qpost(eep, data);
543 __checkReturn efx_rc_t
544 efx_ev_usecs_to_ticks(
546 __in unsigned int us,
547 __out unsigned int *ticksp)
549 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
552 /* Convert microseconds to a timer tick count */
555 else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
556 ticks = 1; /* Never round down to zero */
558 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
564 __checkReturn efx_rc_t
567 __in unsigned int us)
569 efx_nic_t *enp = eep->ee_enp;
570 const efx_ev_ops_t *eevop = enp->en_eevop;
573 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
575 if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
576 EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
581 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
589 EFSYS_PROBE1(fail1, efx_rc_t, rc);
595 efx_ev_qstats_update(
597 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
599 { efx_nic_t *enp = eep->ee_enp;
600 const efx_ev_ops_t *eevop = enp->en_eevop;
602 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
604 eevop->eevo_qstats_update(eep, stat);
607 #endif /* EFSYS_OPT_QSTATS */
611 static __checkReturn efx_rc_t
618 * Program the event queue for receive and transmit queue
621 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
622 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
623 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
629 static __checkReturn boolean_t
632 __in efx_qword_t *eqp,
635 __inout uint16_t *flagsp)
637 boolean_t ignore = B_FALSE;
639 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
640 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
641 EFSYS_PROBE(tobe_disc);
643 * Assume this is a unicast address mismatch, unless below
644 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
645 * EV_RX_PAUSE_FRM_ERR is set.
647 (*flagsp) |= EFX_ADDR_MISMATCH;
650 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
651 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
652 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
653 (*flagsp) |= EFX_DISCARD;
655 #if EFSYS_OPT_RX_SCATTER
657 * Lookout for payload queue ran dry errors and ignore them.
659 * Sadly for the header/data split cases, the descriptor
660 * pointer in this event refers to the header queue and
661 * therefore cannot be easily detected as duplicate.
662 * So we drop these and rely on the receive processing seeing
663 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
664 * the partially received packet.
666 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
667 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
668 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
670 #endif /* EFSYS_OPT_RX_SCATTER */
673 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
674 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
675 EFSYS_PROBE(crc_err);
676 (*flagsp) &= ~EFX_ADDR_MISMATCH;
677 (*flagsp) |= EFX_DISCARD;
680 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
681 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
682 EFSYS_PROBE(pause_frm_err);
683 (*flagsp) &= ~EFX_ADDR_MISMATCH;
684 (*flagsp) |= EFX_DISCARD;
687 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
688 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
689 EFSYS_PROBE(owner_id_err);
690 (*flagsp) |= EFX_DISCARD;
693 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
694 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
695 EFSYS_PROBE(ipv4_err);
696 (*flagsp) &= ~EFX_CKSUM_IPV4;
699 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
700 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
701 EFSYS_PROBE(udp_chk_err);
702 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
705 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
706 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
709 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
710 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
713 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
719 static __checkReturn boolean_t
722 __in efx_qword_t *eqp,
723 __in const efx_ev_callbacks_t *eecp,
730 #if EFSYS_OPT_RX_SCATTER
732 boolean_t jumbo_cont;
733 #endif /* EFSYS_OPT_RX_SCATTER */
738 boolean_t should_abort;
740 EFX_EV_QSTAT_INCR(eep, EV_RX);
742 /* Basic packet information */
743 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
744 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
745 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
746 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
748 #if EFSYS_OPT_RX_SCATTER
749 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
750 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
751 #endif /* EFSYS_OPT_RX_SCATTER */
753 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
755 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
758 * If packet is marked as OK and packet type is TCP/IP or
759 * UDP/IP or other IP, then we can rely on the hardware checksums.
762 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
763 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
765 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
766 flags |= EFX_PKT_IPV6;
768 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
769 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
773 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
774 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
776 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
777 flags |= EFX_PKT_IPV6;
779 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
780 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
784 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
786 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
787 flags = EFX_PKT_IPV6;
789 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
790 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
794 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
795 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
800 EFSYS_ASSERT(B_FALSE);
805 #if EFSYS_OPT_RX_SCATTER
806 /* Report scatter and header/lookahead split buffer flags */
808 flags |= EFX_PKT_START;
810 flags |= EFX_PKT_CONT;
811 #endif /* EFSYS_OPT_RX_SCATTER */
813 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
815 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
817 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
818 uint32_t, size, uint16_t, flags);
824 /* If we're not discarding the packet then it is ok */
825 if (~flags & EFX_DISCARD)
826 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
828 /* Detect multicast packets that didn't match the filter */
829 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
830 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
832 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
833 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
835 EFSYS_PROBE(mcast_mismatch);
836 flags |= EFX_ADDR_MISMATCH;
839 flags |= EFX_PKT_UNICAST;
843 * The packet parser in Siena can abort parsing packets under
844 * certain error conditions, setting the PKT_NOT_PARSED bit
845 * (which clears PKT_OK). If this is set, then don't trust
846 * the PKT_TYPE field.
851 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
853 flags |= EFX_CHECK_VLAN;
856 if (~flags & EFX_CHECK_VLAN) {
859 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
860 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
861 flags |= EFX_PKT_VLAN_TAGGED;
864 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
865 uint32_t, size, uint16_t, flags);
867 EFSYS_ASSERT(eecp->eec_rx != NULL);
868 should_abort = eecp->eec_rx(arg, label, id, size, flags);
870 return (should_abort);
873 static __checkReturn boolean_t
876 __in efx_qword_t *eqp,
877 __in const efx_ev_callbacks_t *eecp,
882 boolean_t should_abort;
884 EFX_EV_QSTAT_INCR(eep, EV_TX);
886 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
887 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
888 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
889 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
891 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
892 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
894 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
896 EFSYS_ASSERT(eecp->eec_tx != NULL);
897 should_abort = eecp->eec_tx(arg, label, id);
899 return (should_abort);
902 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
903 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
904 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
905 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
907 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
908 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
910 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
911 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
913 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
914 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
916 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
920 static __checkReturn boolean_t
923 __in efx_qword_t *eqp,
924 __in const efx_ev_callbacks_t *eecp,
927 _NOTE(ARGUNUSED(eqp, eecp, arg))
929 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
934 static __checkReturn boolean_t
937 __in efx_qword_t *eqp,
938 __in const efx_ev_callbacks_t *eecp,
941 boolean_t should_abort;
943 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
944 should_abort = B_FALSE;
946 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
947 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
950 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
952 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
954 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
956 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
957 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
961 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
965 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
966 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
968 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
969 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
972 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
974 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
976 should_abort = eecp->eec_rxq_flush_failed(arg,
979 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
981 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
983 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
988 case FSE_AZ_EVQ_INIT_DONE_EV:
989 EFSYS_ASSERT(eecp->eec_initialized != NULL);
990 should_abort = eecp->eec_initialized(arg);
994 case FSE_AZ_EVQ_NOT_EN_EV:
995 EFSYS_PROBE(evq_not_en);
998 case FSE_AZ_SRM_UPD_DONE_EV: {
1001 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
1003 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1005 EFSYS_ASSERT(eecp->eec_sram != NULL);
1006 should_abort = eecp->eec_sram(arg, code);
1010 case FSE_AZ_WAKE_UP_EV: {
1013 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1015 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1016 should_abort = eecp->eec_wake_up(arg, id);
1020 case FSE_AZ_TX_PKT_NON_TCP_UDP:
1021 EFSYS_PROBE(tx_pkt_non_tcp_udp);
1024 case FSE_AZ_TIMER_EV: {
1027 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1029 EFSYS_ASSERT(eecp->eec_timer != NULL);
1030 should_abort = eecp->eec_timer(arg, id);
1034 case FSE_AZ_RX_DSC_ERROR_EV:
1035 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
1037 EFSYS_PROBE(rx_dsc_error);
1039 EFSYS_ASSERT(eecp->eec_exception != NULL);
1040 should_abort = eecp->eec_exception(arg,
1041 EFX_EXCEPTION_RX_DSC_ERROR, 0);
1045 case FSE_AZ_TX_DSC_ERROR_EV:
1046 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
1048 EFSYS_PROBE(tx_dsc_error);
1050 EFSYS_ASSERT(eecp->eec_exception != NULL);
1051 should_abort = eecp->eec_exception(arg,
1052 EFX_EXCEPTION_TX_DSC_ERROR, 0);
1060 return (should_abort);
1063 static __checkReturn boolean_t
1065 __in efx_evq_t *eep,
1066 __in efx_qword_t *eqp,
1067 __in const efx_ev_callbacks_t *eecp,
1071 boolean_t should_abort;
1073 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1075 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
1076 if (data >= ((uint32_t)1 << 16)) {
1077 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1078 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1079 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1083 EFSYS_ASSERT(eecp->eec_software != NULL);
1084 should_abort = eecp->eec_software(arg, (uint16_t)data);
1086 return (should_abort);
1091 static __checkReturn boolean_t
1093 __in efx_evq_t *eep,
1094 __in efx_qword_t *eqp,
1095 __in const efx_ev_callbacks_t *eecp,
1098 efx_nic_t *enp = eep->ee_enp;
1100 boolean_t should_abort = B_FALSE;
1102 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
1104 if (enp->en_family != EFX_FAMILY_SIENA)
1107 EFSYS_ASSERT(eecp->eec_link_change != NULL);
1108 EFSYS_ASSERT(eecp->eec_exception != NULL);
1109 #if EFSYS_OPT_MON_STATS
1110 EFSYS_ASSERT(eecp->eec_monitor != NULL);
1113 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1115 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1117 case MCDI_EVENT_CODE_BADSSERT:
1118 efx_mcdi_ev_death(enp, EINTR);
1121 case MCDI_EVENT_CODE_CMDDONE:
1122 efx_mcdi_ev_cpl(enp,
1123 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1124 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1125 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1128 case MCDI_EVENT_CODE_LINKCHANGE: {
1129 efx_link_mode_t link_mode;
1131 siena_phy_link_ev(enp, eqp, &link_mode);
1132 should_abort = eecp->eec_link_change(arg, link_mode);
1135 case MCDI_EVENT_CODE_SENSOREVT: {
1136 #if EFSYS_OPT_MON_STATS
1138 efx_mon_stat_value_t value;
1141 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
1142 should_abort = eecp->eec_monitor(arg, id, value);
1143 else if (rc == ENOTSUP) {
1144 should_abort = eecp->eec_exception(arg,
1145 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1146 MCDI_EV_FIELD(eqp, DATA));
1148 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1150 should_abort = B_FALSE;
1154 case MCDI_EVENT_CODE_SCHEDERR:
1155 /* Informational only */
1158 case MCDI_EVENT_CODE_REBOOT:
1159 efx_mcdi_ev_death(enp, EIO);
1162 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1163 #if EFSYS_OPT_MAC_STATS
1164 if (eecp->eec_mac_stats != NULL) {
1165 eecp->eec_mac_stats(arg,
1166 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1171 case MCDI_EVENT_CODE_FWALERT: {
1172 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1174 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1175 should_abort = eecp->eec_exception(arg,
1176 EFX_EXCEPTION_FWALERT_SRAM,
1177 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1179 should_abort = eecp->eec_exception(arg,
1180 EFX_EXCEPTION_UNKNOWN_FWALERT,
1181 MCDI_EV_FIELD(eqp, DATA));
1186 EFSYS_PROBE1(mc_pcol_error, int, code);
1191 return (should_abort);
1194 #endif /* EFSYS_OPT_MCDI */
1196 static __checkReturn efx_rc_t
1198 __in efx_evq_t *eep,
1199 __in unsigned int count)
1201 efx_nic_t *enp = eep->ee_enp;
1205 rptr = count & eep->ee_mask;
1207 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
1209 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
1217 __in efx_evq_t *eep,
1220 efx_nic_t *enp = eep->ee_enp;
1224 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
1225 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
1227 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
1228 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
1229 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
1231 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
1234 static __checkReturn efx_rc_t
1236 __in efx_evq_t *eep,
1237 __in unsigned int us)
1239 efx_nic_t *enp = eep->ee_enp;
1240 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1241 unsigned int locked;
1245 if (us > encp->enc_evq_timer_max_us) {
1250 /* If the value is zero then disable the timer */
1252 EFX_POPULATE_DWORD_2(dword,
1253 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
1254 FRF_CZ_TC_TIMER_VAL, 0);
1258 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
1261 EFSYS_ASSERT(ticks > 0);
1262 EFX_POPULATE_DWORD_2(dword,
1263 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
1264 FRF_CZ_TC_TIMER_VAL, ticks - 1);
1267 locked = (eep->ee_index == 0) ? 1 : 0;
1269 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
1270 eep->ee_index, &dword, locked);
1277 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1282 static __checkReturn efx_rc_t
1284 __in efx_nic_t *enp,
1285 __in unsigned int index,
1286 __in efsys_mem_t *esmp,
1290 __in uint32_t flags,
1291 __in efx_evq_t *eep)
1293 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1297 boolean_t notify_mode;
1299 _NOTE(ARGUNUSED(esmp))
1301 if (index >= encp->enc_evq_limit) {
1305 #if EFSYS_OPT_RX_SCALE
1306 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
1307 index >= EFX_MAXRSS_LEGACY) {
1313 (1U << size) <= encp->enc_evq_max_nevs / encp->enc_evq_min_nevs;
1315 if ((1U << size) == (uint32_t)ndescs / encp->enc_evq_min_nevs)
1317 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1322 /* Set up the handler table */
1323 eep->ee_rx = siena_ev_rx;
1324 eep->ee_tx = siena_ev_tx;
1325 eep->ee_driver = siena_ev_driver;
1326 eep->ee_global = siena_ev_global;
1327 eep->ee_drv_gen = siena_ev_drv_gen;
1329 eep->ee_mcdi = siena_ev_mcdi;
1330 #endif /* EFSYS_OPT_MCDI */
1332 notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
1333 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
1335 /* Set up the new event queue */
1336 EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
1337 FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
1338 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1339 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
1341 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
1342 FRF_AZ_EVQ_BUF_BASE_ID, id);
1344 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
1346 /* Set initial interrupt moderation */
1347 siena_ev_qmoderate(eep, us);
1353 #if EFSYS_OPT_RX_SCALE
1358 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1363 #endif /* EFSYS_OPT_SIENA */
1365 #if EFSYS_OPT_QSTATS
1367 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */
1368 static const char * const __efx_ev_qstat_name[] = {
1375 "rx_buf_owner_id_err",
1376 "rx_ipv4_hdr_chksum_err",
1377 "rx_tcp_udp_chksum_err",
1381 "rx_mcast_hash_match",
1398 "driver_srm_upd_done",
1399 "driver_tx_descq_fls_done",
1400 "driver_rx_descq_fls_done",
1401 "driver_rx_descq_fls_failed",
1402 "driver_rx_dsc_error",
1403 "driver_tx_dsc_error",
1407 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1411 __in efx_nic_t *enp,
1412 __in unsigned int id)
1414 _NOTE(ARGUNUSED(enp))
1416 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1417 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1419 return (__efx_ev_qstat_name[id]);
1421 #endif /* EFSYS_OPT_NAMES */
1422 #endif /* EFSYS_OPT_QSTATS */
1426 #if EFSYS_OPT_QSTATS
1428 siena_ev_qstats_update(
1429 __in efx_evq_t *eep,
1430 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1434 for (id = 0; id < EV_NQSTATS; id++) {
1435 efsys_stat_t *essp = &stat[id];
1437 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1438 eep->ee_stat[id] = 0;
1441 #endif /* EFSYS_OPT_QSTATS */
1445 __in efx_evq_t *eep)
1447 efx_nic_t *enp = eep->ee_enp;
1450 /* Purge event queue */
1451 EFX_ZERO_OWORD(oword);
1453 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1454 eep->ee_index, &oword, B_TRUE);
1456 EFX_ZERO_OWORD(oword);
1457 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
1462 __in efx_nic_t *enp)
1464 _NOTE(ARGUNUSED(enp))
1467 #endif /* EFSYS_OPT_SIENA */