2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
34 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
37 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
39 (_eep)->ee_stat[_stat]++; \
40 _NOTE(CONSTANTCONDITION) \
43 #define EFX_EV_QSTAT_INCR(_eep, _stat)
47 * Non-interrupting event queue requires interrrupting event queue to
48 * refer to for wake-up events even if wake ups are never used.
49 * It could be even non-allocated event queue.
51 #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
53 static __checkReturn boolean_t
56 __in efx_qword_t *eqp,
57 __in const efx_ev_callbacks_t *eecp,
60 static __checkReturn boolean_t
63 __in efx_qword_t *eqp,
64 __in const efx_ev_callbacks_t *eecp,
67 static __checkReturn boolean_t
70 __in efx_qword_t *eqp,
71 __in const efx_ev_callbacks_t *eecp,
74 static __checkReturn boolean_t
77 __in efx_qword_t *eqp,
78 __in const efx_ev_callbacks_t *eecp,
81 static __checkReturn boolean_t
84 __in efx_qword_t *eqp,
85 __in const efx_ev_callbacks_t *eecp,
89 static __checkReturn efx_rc_t
92 __in uint32_t instance,
94 __in uint32_t timer_ns)
97 uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
98 MC_CMD_SET_EVQ_TMR_OUT_LEN)];
101 (void) memset(payload, 0, sizeof (payload));
102 req.emr_cmd = MC_CMD_SET_EVQ_TMR;
103 req.emr_in_buf = payload;
104 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
105 req.emr_out_buf = payload;
106 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
108 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
109 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
110 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
111 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
113 efx_mcdi_execute(enp, &req);
115 if (req.emr_rc != 0) {
120 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
130 EFSYS_PROBE1(fail1, efx_rc_t, rc);
135 static __checkReturn efx_rc_t
138 __in unsigned int instance,
139 __in efsys_mem_t *esmp,
144 __in boolean_t low_latency)
148 MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
149 MC_CMD_INIT_EVQ_OUT_LEN)];
150 efx_qword_t *dma_addr;
154 boolean_t interrupting;
158 npages = EFX_EVQ_NBUFS(nevs);
159 if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
164 (void) memset(payload, 0, sizeof (payload));
165 req.emr_cmd = MC_CMD_INIT_EVQ;
166 req.emr_in_buf = payload;
167 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
168 req.emr_out_buf = payload;
169 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
171 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
172 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
173 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
175 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
176 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
179 * On Huntington RX and TX event batching can only be requested together
180 * (even if the datapath firmware doesn't actually support RX
181 * batching). If event cut through is enabled no RX batching will occur.
183 * So always enable RX and TX event batching, and enable event cut
184 * through if we want low latency operation.
186 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
187 case EFX_EVQ_FLAGS_TYPE_AUTO:
188 ev_cut_through = low_latency ? 1 : 0;
190 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
193 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
200 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
201 INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
202 INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
203 INIT_EVQ_IN_FLAG_INT_ARMD, 0,
204 INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
205 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
206 INIT_EVQ_IN_FLAG_TX_MERGE, 1);
208 /* If the value is zero then disable the timer */
210 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
211 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
212 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
213 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
217 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
220 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
221 MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
222 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
223 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
226 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
227 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
228 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
230 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
231 addr = EFSYS_MEM_ADDR(esmp);
233 for (i = 0; i < npages; i++) {
234 EFX_POPULATE_QWORD_2(*dma_addr,
235 EFX_DWORD_1, (uint32_t)(addr >> 32),
236 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
239 addr += EFX_BUF_SIZE;
242 efx_mcdi_execute(enp, &req);
244 if (req.emr_rc != 0) {
249 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
254 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
267 EFSYS_PROBE1(fail1, efx_rc_t, rc);
273 static __checkReturn efx_rc_t
274 efx_mcdi_init_evq_v2(
276 __in unsigned int instance,
277 __in efsys_mem_t *esmp,
285 MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
286 MC_CMD_INIT_EVQ_V2_OUT_LEN)];
287 boolean_t interrupting;
288 unsigned int evq_type;
289 efx_qword_t *dma_addr;
295 npages = EFX_EVQ_NBUFS(nevs);
296 if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
301 (void) memset(payload, 0, sizeof (payload));
302 req.emr_cmd = MC_CMD_INIT_EVQ;
303 req.emr_in_buf = payload;
304 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
305 req.emr_out_buf = payload;
306 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
308 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
309 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
310 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
312 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
313 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
315 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
316 case EFX_EVQ_FLAGS_TYPE_AUTO:
317 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
319 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
320 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
322 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
323 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
329 MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
330 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
331 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
332 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
333 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
335 /* If the value is zero then disable the timer */
337 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
338 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
339 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
340 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
344 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
347 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
348 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
349 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
350 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
353 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
354 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
355 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
357 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
358 addr = EFSYS_MEM_ADDR(esmp);
360 for (i = 0; i < npages; i++) {
361 EFX_POPULATE_QWORD_2(*dma_addr,
362 EFX_DWORD_1, (uint32_t)(addr >> 32),
363 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
366 addr += EFX_BUF_SIZE;
369 efx_mcdi_execute(enp, &req);
371 if (req.emr_rc != 0) {
376 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
381 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
383 EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
384 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
397 EFSYS_PROBE1(fail1, efx_rc_t, rc);
402 static __checkReturn efx_rc_t
405 __in uint32_t instance)
408 uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
409 MC_CMD_FINI_EVQ_OUT_LEN)];
412 (void) memset(payload, 0, sizeof (payload));
413 req.emr_cmd = MC_CMD_FINI_EVQ;
414 req.emr_in_buf = payload;
415 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
416 req.emr_out_buf = payload;
417 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
419 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
421 efx_mcdi_execute_quiet(enp, &req);
423 if (req.emr_rc != 0) {
431 EFSYS_PROBE1(fail1, efx_rc_t, rc);
438 __checkReturn efx_rc_t
442 _NOTE(ARGUNUSED(enp))
450 _NOTE(ARGUNUSED(enp))
453 __checkReturn efx_rc_t
456 __in unsigned int index,
457 __in efsys_mem_t *esmp,
464 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
468 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
469 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
470 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
472 if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
477 if (index >= encp->enc_evq_limit) {
482 if (us > encp->enc_evq_timer_max_us) {
487 /* Set up the handler table */
488 eep->ee_rx = ef10_ev_rx;
489 eep->ee_tx = ef10_ev_tx;
490 eep->ee_driver = ef10_ev_driver;
491 eep->ee_drv_gen = ef10_ev_drv_gen;
492 eep->ee_mcdi = ef10_ev_mcdi;
494 /* Set up the event queue */
495 /* INIT_EVQ expects function-relative vector number */
496 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
497 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
499 } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
501 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
502 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
504 irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
508 * Interrupts may be raised for events immediately after the queue is
509 * created. See bug58606.
512 if (encp->enc_init_evq_v2_supported) {
514 * On Medford the low latency license is required to enable RX
515 * and event cut through and to disable RX batching. If event
516 * queue type in flags is auto, we let the firmware decide the
517 * settings to use. If the adapter has a low latency license,
518 * it will choose the best settings for low latency, otherwise
519 * it will choose the best settings for throughput.
521 rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
526 * On Huntington we need to specify the settings to use.
527 * If event queue type in flags is auto, we favour throughput
528 * if the adapter is running virtualization supporting firmware
529 * (i.e. the full featured firmware variant)
530 * and latency otherwise. The Ethernet Virtual Bridging
531 * capability is used to make this decision. (Note though that
532 * the low latency firmware variant is also best for
533 * throughput and corresponding type should be specified
536 boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
537 rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
554 EFSYS_PROBE1(fail1, efx_rc_t, rc);
563 efx_nic_t *enp = eep->ee_enp;
565 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
566 enp->en_family == EFX_FAMILY_MEDFORD);
568 (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
571 __checkReturn efx_rc_t
574 __in unsigned int count)
576 efx_nic_t *enp = eep->ee_enp;
580 rptr = count & eep->ee_mask;
582 if (enp->en_nic_cfg.enc_bug35388_workaround) {
583 EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
584 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
585 EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
586 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
588 EFX_POPULATE_DWORD_2(dword,
589 ERF_DD_EVQ_IND_RPTR_FLAGS,
590 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
592 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
593 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
596 EFX_POPULATE_DWORD_2(dword,
597 ERF_DD_EVQ_IND_RPTR_FLAGS,
598 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
600 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
601 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
604 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
605 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
612 static __checkReturn efx_rc_t
613 efx_mcdi_driver_event(
616 __in efx_qword_t data)
619 uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
620 MC_CMD_DRIVER_EVENT_OUT_LEN)];
623 req.emr_cmd = MC_CMD_DRIVER_EVENT;
624 req.emr_in_buf = payload;
625 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
626 req.emr_out_buf = payload;
627 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
629 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
631 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
632 EFX_QWORD_FIELD(data, EFX_DWORD_0));
633 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
634 EFX_QWORD_FIELD(data, EFX_DWORD_1));
636 efx_mcdi_execute(enp, &req);
638 if (req.emr_rc != 0) {
646 EFSYS_PROBE1(fail1, efx_rc_t, rc);
656 efx_nic_t *enp = eep->ee_enp;
659 EFX_POPULATE_QWORD_3(event,
660 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
661 ESF_DZ_DRV_SUB_CODE, 0,
662 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
664 (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
667 __checkReturn efx_rc_t
670 __in unsigned int us)
672 efx_nic_t *enp = eep->ee_enp;
673 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
678 /* Check that hardware and MCDI use the same timer MODE values */
679 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
680 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
681 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
682 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
683 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
684 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
685 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
686 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
688 if (us > encp->enc_evq_timer_max_us) {
693 /* If the value is zero then disable the timer */
695 mode = FFE_CZ_TIMER_MODE_DIS;
697 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
700 if (encp->enc_bug61265_workaround) {
701 uint32_t ns = us * 1000;
703 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
709 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
712 if (encp->enc_bug35388_workaround) {
713 EFX_POPULATE_DWORD_3(dword,
714 ERF_DD_EVQ_IND_TIMER_FLAGS,
715 EFE_DD_EVQ_IND_TIMER_FLAGS,
716 ERF_DD_EVQ_IND_TIMER_MODE, mode,
717 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
718 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
719 eep->ee_index, &dword, 0);
721 EFX_POPULATE_DWORD_2(dword,
722 ERF_DZ_TC_TIMER_MODE, mode,
723 ERF_DZ_TC_TIMER_VAL, ticks);
724 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
725 eep->ee_index, &dword, 0);
736 EFSYS_PROBE1(fail1, efx_rc_t, rc);
744 ef10_ev_qstats_update(
746 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
750 for (id = 0; id < EV_NQSTATS; id++) {
751 efsys_stat_t *essp = &stat[id];
753 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
754 eep->ee_stat[id] = 0;
757 #endif /* EFSYS_OPT_QSTATS */
759 static __checkReturn boolean_t
762 __in efx_qword_t *eqp,
763 __in const efx_ev_callbacks_t *eecp,
766 efx_nic_t *enp = eep->ee_enp;
770 uint32_t eth_tag_class;
773 uint32_t next_read_lbits;
776 boolean_t should_abort;
777 efx_evq_rxq_state_t *eersp;
778 unsigned int desc_count;
779 unsigned int last_used_id;
781 EFX_EV_QSTAT_INCR(eep, EV_RX);
783 /* Discard events after RXQ/TXQ errors */
784 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
787 /* Basic packet information */
788 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
789 eersp = &eep->ee_rxq_state[label];
791 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
792 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
793 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
794 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
795 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
796 l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
797 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
799 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
800 /* Drop this event */
807 * This may be part of a scattered frame, or it may be a
808 * truncated frame if scatter is disabled on this RXQ.
809 * Overlength frames can be received if e.g. a VF is configured
810 * for 1500 MTU but connected to a port set to 9000 MTU
812 * FIXME: There is not yet any driver that supports scatter on
813 * Huntington. Scatter support is required for OSX.
815 flags |= EFX_PKT_CONT;
818 if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
819 flags |= EFX_PKT_UNICAST;
821 /* Increment the count of descriptors read */
822 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
823 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
824 eersp->eers_rx_read_ptr += desc_count;
827 * FIXME: add error checking to make sure this a batched event.
828 * This could also be an aborted scatter, see Bug36629.
830 if (desc_count > 1) {
831 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
832 flags |= EFX_PKT_PREFIX_LEN;
835 /* Calculate the index of the last descriptor consumed */
836 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
838 /* Check for errors that invalidate checksum and L3/L4 fields */
839 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
840 /* RX frame truncated (error flag is misnamed) */
841 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
842 flags |= EFX_DISCARD;
845 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
846 /* Bad Ethernet frame CRC */
847 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
848 flags |= EFX_DISCARD;
851 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
853 * Hardware parse failed, due to malformed headers
854 * or headers that are too long for the parser.
855 * Headers and checksums must be validated by the host.
857 /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
861 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
862 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
863 flags |= EFX_PKT_VLAN_TAGGED;
867 case ESE_DZ_L3_CLASS_IP4:
868 case ESE_DZ_L3_CLASS_IP4_FRAG:
869 flags |= EFX_PKT_IPV4;
870 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
871 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
873 flags |= EFX_CKSUM_IPV4;
876 if (l4_class == ESE_DZ_L4_CLASS_TCP) {
877 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
878 flags |= EFX_PKT_TCP;
879 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
880 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
881 flags |= EFX_PKT_UDP;
883 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
887 case ESE_DZ_L3_CLASS_IP6:
888 case ESE_DZ_L3_CLASS_IP6_FRAG:
889 flags |= EFX_PKT_IPV6;
891 if (l4_class == ESE_DZ_L4_CLASS_TCP) {
892 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
893 flags |= EFX_PKT_TCP;
894 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
895 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
896 flags |= EFX_PKT_UDP;
898 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
903 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
907 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
908 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
909 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
911 flags |= EFX_CKSUM_TCPUDP;
916 /* If we're not discarding the packet then it is ok */
917 if (~flags & EFX_DISCARD)
918 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
920 EFSYS_ASSERT(eecp->eec_rx != NULL);
921 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
923 return (should_abort);
926 static __checkReturn boolean_t
929 __in efx_qword_t *eqp,
930 __in const efx_ev_callbacks_t *eecp,
933 efx_nic_t *enp = eep->ee_enp;
936 boolean_t should_abort;
938 EFX_EV_QSTAT_INCR(eep, EV_TX);
940 /* Discard events after RXQ/TXQ errors */
941 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
944 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
945 /* Drop this event */
949 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
950 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
951 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
953 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
955 EFSYS_ASSERT(eecp->eec_tx != NULL);
956 should_abort = eecp->eec_tx(arg, label, id);
958 return (should_abort);
961 static __checkReturn boolean_t
964 __in efx_qword_t *eqp,
965 __in const efx_ev_callbacks_t *eecp,
969 boolean_t should_abort;
971 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
972 should_abort = B_FALSE;
974 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
976 case ESE_DZ_DRV_TIMER_EV: {
979 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
981 EFSYS_ASSERT(eecp->eec_timer != NULL);
982 should_abort = eecp->eec_timer(arg, id);
986 case ESE_DZ_DRV_WAKE_UP_EV: {
989 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
991 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
992 should_abort = eecp->eec_wake_up(arg, id);
996 case ESE_DZ_DRV_START_UP_EV:
997 EFSYS_ASSERT(eecp->eec_initialized != NULL);
998 should_abort = eecp->eec_initialized(arg);
1002 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1003 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1004 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1008 return (should_abort);
1011 static __checkReturn boolean_t
1013 __in efx_evq_t *eep,
1014 __in efx_qword_t *eqp,
1015 __in const efx_ev_callbacks_t *eecp,
1019 boolean_t should_abort;
1021 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1022 should_abort = B_FALSE;
1024 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1025 if (data >= ((uint32_t)1 << 16)) {
1026 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1027 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1028 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1033 EFSYS_ASSERT(eecp->eec_software != NULL);
1034 should_abort = eecp->eec_software(arg, (uint16_t)data);
1036 return (should_abort);
1039 static __checkReturn boolean_t
1041 __in efx_evq_t *eep,
1042 __in efx_qword_t *eqp,
1043 __in const efx_ev_callbacks_t *eecp,
1046 efx_nic_t *enp = eep->ee_enp;
1048 boolean_t should_abort = B_FALSE;
1050 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1052 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1054 case MCDI_EVENT_CODE_BADSSERT:
1055 efx_mcdi_ev_death(enp, EINTR);
1058 case MCDI_EVENT_CODE_CMDDONE:
1059 efx_mcdi_ev_cpl(enp,
1060 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1061 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1062 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1065 #if EFSYS_OPT_MCDI_PROXY_AUTH
1066 case MCDI_EVENT_CODE_PROXY_RESPONSE:
1068 * This event notifies a function that an authorization request
1069 * has been processed. If the request was authorized then the
1070 * function can now re-send the original MCDI request.
1071 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1073 efx_mcdi_ev_proxy_response(enp,
1074 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1075 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1077 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1079 case MCDI_EVENT_CODE_LINKCHANGE: {
1080 efx_link_mode_t link_mode;
1082 ef10_phy_link_ev(enp, eqp, &link_mode);
1083 should_abort = eecp->eec_link_change(arg, link_mode);
1087 case MCDI_EVENT_CODE_SENSOREVT: {
1091 case MCDI_EVENT_CODE_SCHEDERR:
1092 /* Informational only */
1095 case MCDI_EVENT_CODE_REBOOT:
1096 /* Falcon/Siena only (should not been seen with Huntington). */
1097 efx_mcdi_ev_death(enp, EIO);
1100 case MCDI_EVENT_CODE_MC_REBOOT:
1101 /* MC_REBOOT event is used for Huntington (EF10) and later. */
1102 efx_mcdi_ev_death(enp, EIO);
1105 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1108 case MCDI_EVENT_CODE_FWALERT: {
1109 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1111 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1112 should_abort = eecp->eec_exception(arg,
1113 EFX_EXCEPTION_FWALERT_SRAM,
1114 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1116 should_abort = eecp->eec_exception(arg,
1117 EFX_EXCEPTION_UNKNOWN_FWALERT,
1118 MCDI_EV_FIELD(eqp, DATA));
1122 case MCDI_EVENT_CODE_TX_ERR: {
1124 * After a TXQ error is detected, firmware sends a TX_ERR event.
1125 * This may be followed by TX completions (which we discard),
1126 * and then finally by a TX_FLUSH event. Firmware destroys the
1127 * TXQ automatically after sending the TX_FLUSH event.
1129 enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1131 EFSYS_PROBE2(tx_descq_err,
1132 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1133 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1135 /* Inform the driver that a reset is required. */
1136 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1137 MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1141 case MCDI_EVENT_CODE_TX_FLUSH: {
1142 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1145 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1146 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1147 * We want to wait for all completions, so ignore the events
1148 * with TX_FLUSH_TO_DRIVER.
1150 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1151 should_abort = B_FALSE;
1155 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1157 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1159 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1160 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1164 case MCDI_EVENT_CODE_RX_ERR: {
1166 * After an RXQ error is detected, firmware sends an RX_ERR
1167 * event. This may be followed by RX events (which we discard),
1168 * and then finally by an RX_FLUSH event. Firmware destroys the
1169 * RXQ automatically after sending the RX_FLUSH event.
1171 enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1173 EFSYS_PROBE2(rx_descq_err,
1174 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1175 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1177 /* Inform the driver that a reset is required. */
1178 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1179 MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1183 case MCDI_EVENT_CODE_RX_FLUSH: {
1184 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1187 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1188 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1189 * We want to wait for all completions, so ignore the events
1190 * with RX_FLUSH_TO_DRIVER.
1192 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1193 should_abort = B_FALSE;
1197 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1199 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1201 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1202 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1207 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1208 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1209 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1213 return (should_abort);
1217 ef10_ev_rxlabel_init(
1218 __in efx_evq_t *eep,
1219 __in efx_rxq_t *erp,
1220 __in unsigned int label,
1221 __in boolean_t packed_stream)
1223 efx_evq_rxq_state_t *eersp;
1225 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1226 eersp = &eep->ee_rxq_state[label];
1228 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1230 eersp->eers_rx_read_ptr = 0;
1231 eersp->eers_rx_mask = erp->er_mask;
1232 EFSYS_ASSERT(!packed_stream);
1236 ef10_ev_rxlabel_fini(
1237 __in efx_evq_t *eep,
1238 __in unsigned int label)
1240 efx_evq_rxq_state_t *eersp;
1242 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1243 eersp = &eep->ee_rxq_state[label];
1245 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1247 eersp->eers_rx_read_ptr = 0;
1248 eersp->eers_rx_mask = 0;
1251 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */