1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2012-2019 Solarflare Communications Inc.
9 #if EFSYS_OPT_MON_STATS
16 * Non-interrupting event queue requires interrrupting event queue to
17 * refer to for wake-up events even if wake ups are never used.
18 * It could be even non-allocated event queue.
20 #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
22 static __checkReturn boolean_t
25 __in efx_qword_t *eqp,
26 __in const efx_ev_callbacks_t *eecp,
29 static __checkReturn boolean_t
32 __in efx_qword_t *eqp,
33 __in const efx_ev_callbacks_t *eecp,
36 static __checkReturn boolean_t
39 __in efx_qword_t *eqp,
40 __in const efx_ev_callbacks_t *eecp,
43 static __checkReturn boolean_t
46 __in efx_qword_t *eqp,
47 __in const efx_ev_callbacks_t *eecp,
50 static __checkReturn boolean_t
53 __in efx_qword_t *eqp,
54 __in const efx_ev_callbacks_t *eecp,
58 static __checkReturn efx_rc_t
61 __in uint32_t instance,
63 __in uint32_t timer_ns)
66 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
67 MC_CMD_SET_EVQ_TMR_OUT_LEN);
70 req.emr_cmd = MC_CMD_SET_EVQ_TMR;
71 req.emr_in_buf = payload;
72 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
73 req.emr_out_buf = payload;
74 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
76 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
77 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
78 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
79 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
81 efx_mcdi_execute(enp, &req);
83 if (req.emr_rc != 0) {
88 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
98 EFSYS_PROBE1(fail1, efx_rc_t, rc);
104 __checkReturn efx_rc_t
108 _NOTE(ARGUNUSED(enp))
116 _NOTE(ARGUNUSED(enp))
119 __checkReturn efx_rc_t
122 __in unsigned int index,
123 __in efsys_mem_t *esmp,
130 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
134 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
137 * NO_CONT_EV mode is only requested from the firmware when creating
138 * receive queues, but here it needs to be specified at event queue
139 * creation, as the event handler needs to know which format is in use.
141 * If EFX_EVQ_FLAGS_NO_CONT_EV is specified, all receive queues for this
142 * event queue will be created in NO_CONT_EV mode.
144 * See SF-109306-TC 5.11 "Events for RXQs in NO_CONT_EV mode".
146 if (flags & EFX_EVQ_FLAGS_NO_CONT_EV) {
147 if (enp->en_nic_cfg.enc_no_cont_ev_mode_supported == B_FALSE) {
153 /* Set up the handler table */
154 eep->ee_rx = ef10_ev_rx;
155 eep->ee_tx = ef10_ev_tx;
156 eep->ee_driver = ef10_ev_driver;
157 eep->ee_drv_gen = ef10_ev_drv_gen;
158 eep->ee_mcdi = ef10_ev_mcdi;
160 /* Set up the event queue */
161 /* INIT_EVQ expects function-relative vector number */
162 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
163 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
165 } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
167 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
168 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
170 irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
174 * Interrupts may be raised for events immediately after the queue is
175 * created. See bug58606.
178 if (encp->enc_init_evq_v2_supported) {
180 * On Medford the low latency license is required to enable RX
181 * and event cut through and to disable RX batching. If event
182 * queue type in flags is auto, we let the firmware decide the
183 * settings to use. If the adapter has a low latency license,
184 * it will choose the best settings for low latency, otherwise
185 * it will choose the best settings for throughput.
187 rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us,
193 * On Huntington we need to specify the settings to use.
194 * If event queue type in flags is auto, we favour throughput
195 * if the adapter is running virtualization supporting firmware
196 * (i.e. the full featured firmware variant)
197 * and latency otherwise. The Ethernet Virtual Bridging
198 * capability is used to make this decision. (Note though that
199 * the low latency firmware variant is also best for
200 * throughput and corresponding type should be specified
203 boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
204 rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
217 EFSYS_PROBE1(fail1, efx_rc_t, rc);
226 efx_nic_t *enp = eep->ee_enp;
228 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
230 (void) efx_mcdi_fini_evq(enp, eep->ee_index);
233 __checkReturn efx_rc_t
236 __in unsigned int count)
238 efx_nic_t *enp = eep->ee_enp;
242 rptr = count & eep->ee_mask;
244 if (enp->en_nic_cfg.enc_bug35388_workaround) {
245 EFX_STATIC_ASSERT(EF10_EVQ_MINNEVS >
246 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
247 EFX_STATIC_ASSERT(EF10_EVQ_MAXNEVS <
248 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
250 EFX_POPULATE_DWORD_2(dword,
251 ERF_DD_EVQ_IND_RPTR_FLAGS,
252 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
254 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
255 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
258 EFX_POPULATE_DWORD_2(dword,
259 ERF_DD_EVQ_IND_RPTR_FLAGS,
260 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
262 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
263 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
266 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
267 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
274 static __checkReturn efx_rc_t
275 efx_mcdi_driver_event(
278 __in efx_qword_t data)
281 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
282 MC_CMD_DRIVER_EVENT_OUT_LEN);
285 req.emr_cmd = MC_CMD_DRIVER_EVENT;
286 req.emr_in_buf = payload;
287 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
288 req.emr_out_buf = payload;
289 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
291 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
293 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
294 EFX_QWORD_FIELD(data, EFX_DWORD_0));
295 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
296 EFX_QWORD_FIELD(data, EFX_DWORD_1));
298 efx_mcdi_execute(enp, &req);
300 if (req.emr_rc != 0) {
308 EFSYS_PROBE1(fail1, efx_rc_t, rc);
318 efx_nic_t *enp = eep->ee_enp;
321 EFX_POPULATE_QWORD_3(event,
322 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
323 ESF_DZ_DRV_SUB_CODE, 0,
324 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
326 (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
329 __checkReturn efx_rc_t
332 __in unsigned int us)
334 efx_nic_t *enp = eep->ee_enp;
335 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
340 /* Check that hardware and MCDI use the same timer MODE values */
341 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
342 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
343 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
344 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
345 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
346 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
347 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
348 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
350 if (us > encp->enc_evq_timer_max_us) {
355 /* If the value is zero then disable the timer */
357 mode = FFE_CZ_TIMER_MODE_DIS;
359 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
362 if (encp->enc_bug61265_workaround) {
363 uint32_t ns = us * 1000;
365 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
371 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
374 if (encp->enc_bug35388_workaround) {
375 EFX_POPULATE_DWORD_3(dword,
376 ERF_DD_EVQ_IND_TIMER_FLAGS,
377 EFE_DD_EVQ_IND_TIMER_FLAGS,
378 ERF_DD_EVQ_IND_TIMER_MODE, mode,
379 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
380 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
381 eep->ee_index, &dword, 0);
384 * NOTE: The TMR_REL field introduced in Medford2 is
385 * ignored on earlier EF10 controllers. See bug66418
386 * comment 9 for details.
388 EFX_POPULATE_DWORD_3(dword,
389 ERF_DZ_TC_TIMER_MODE, mode,
390 ERF_DZ_TC_TIMER_VAL, ticks,
391 ERF_FZ_TC_TMR_REL_VAL, ticks);
392 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
393 eep->ee_index, &dword, 0);
404 EFSYS_PROBE1(fail1, efx_rc_t, rc);
412 ef10_ev_qstats_update(
414 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
418 for (id = 0; id < EV_NQSTATS; id++) {
419 efsys_stat_t *essp = &stat[id];
421 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
422 eep->ee_stat[id] = 0;
425 #endif /* EFSYS_OPT_QSTATS */
427 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
429 static __checkReturn boolean_t
430 ef10_ev_rx_packed_stream(
432 __in efx_qword_t *eqp,
433 __in const efx_ev_callbacks_t *eecp,
437 uint32_t pkt_count_lbits;
439 boolean_t should_abort;
440 efx_evq_rxq_state_t *eersp;
441 unsigned int pkt_count;
442 unsigned int current_id;
443 boolean_t new_buffer;
445 pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
446 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
447 new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
451 eersp = &eep->ee_rxq_state[label];
454 * RX_DSC_PTR_LBITS has least significant bits of the global
455 * (not per-buffer) packet counter. It is guaranteed that
456 * maximum number of completed packets fits in lbits-mask.
457 * So, modulo lbits-mask arithmetic should be used to calculate
458 * packet counter increment.
460 pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
461 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
462 eersp->eers_rx_stream_npackets += pkt_count;
465 flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
466 #if EFSYS_OPT_RX_PACKED_STREAM
468 * If both packed stream and equal stride super-buffer
469 * modes are compiled in, in theory credits should be
470 * be maintained for packed stream only, but right now
471 * these modes are not distinguished in the event queue
472 * Rx queue state and it is OK to increment the counter
473 * regardless (it might be event cheaper than branching
474 * since neighbour structure member are updated as well).
476 eersp->eers_rx_packed_stream_credits++;
478 eersp->eers_rx_read_ptr++;
480 current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
482 /* Check for errors that invalidate checksum and L3/L4 fields */
483 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
484 /* RX frame truncated */
485 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
486 flags |= EFX_DISCARD;
489 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
490 /* Bad Ethernet frame CRC */
491 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
492 flags |= EFX_DISCARD;
496 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
497 EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
498 flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
502 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
503 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
505 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
506 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
509 /* If we're not discarding the packet then it is ok */
510 if (~flags & EFX_DISCARD)
511 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
513 EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
514 should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
517 return (should_abort);
520 #endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
522 static __checkReturn boolean_t
525 __in efx_qword_t *eqp,
526 __in const efx_ev_callbacks_t *eecp,
529 efx_nic_t *enp = eep->ee_enp;
533 uint32_t eth_tag_class;
536 uint32_t next_read_lbits;
539 boolean_t should_abort;
540 efx_evq_rxq_state_t *eersp;
541 unsigned int desc_count;
542 unsigned int last_used_id;
544 EFX_EV_QSTAT_INCR(eep, EV_RX);
546 /* Discard events after RXQ/TXQ errors, or hardware not available */
547 if (enp->en_reset_flags &
548 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
551 /* Basic packet information */
552 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
553 eersp = &eep->ee_rxq_state[label];
555 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
557 * Packed stream events are very different,
558 * so handle them separately
560 if (eersp->eers_rx_packed_stream)
561 return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
564 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
565 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
566 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
567 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
568 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
569 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
572 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
573 * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
574 * and values for all EF10 controllers.
576 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
577 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
578 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
579 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
581 l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
583 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
584 /* Drop this event */
591 * This may be part of a scattered frame, or it may be a
592 * truncated frame if scatter is disabled on this RXQ.
593 * Overlength frames can be received if e.g. a VF is configured
594 * for 1500 MTU but connected to a port set to 9000 MTU
596 * FIXME: There is not yet any driver that supports scatter on
597 * Huntington. Scatter support is required for OSX.
599 flags |= EFX_PKT_CONT;
602 if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
603 flags |= EFX_PKT_UNICAST;
606 * Increment the count of descriptors read.
608 * In NO_CONT_EV mode, RX_DSC_PTR_LBITS is actually a packet count, but
609 * when scatter is disabled, there is only one descriptor per packet and
610 * so it can be treated the same.
612 * TODO: Support scatter in NO_CONT_EV mode.
614 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
615 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
616 eersp->eers_rx_read_ptr += desc_count;
618 /* Calculate the index of the last descriptor consumed */
619 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
621 if (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV) {
623 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
625 /* Always read the length from the prefix in NO_CONT_EV mode. */
626 flags |= EFX_PKT_PREFIX_LEN;
629 * Check for an aborted scatter, signalled by the ABORT bit in
630 * NO_CONT_EV mode. The ABORT bit was not used before NO_CONT_EV
631 * mode was added as it was broken in Huntington silicon.
633 if (EFX_QWORD_FIELD(*eqp, ESF_EZ_RX_ABORT) != 0) {
634 flags |= EFX_DISCARD;
637 } else if (desc_count > 1) {
639 * FIXME: add error checking to make sure this a batched event.
640 * This could also be an aborted scatter, see Bug36629.
642 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
643 flags |= EFX_PKT_PREFIX_LEN;
646 /* Check for errors that invalidate checksum and L3/L4 fields */
647 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
648 /* RX frame truncated */
649 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
650 flags |= EFX_DISCARD;
653 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
654 /* Bad Ethernet frame CRC */
655 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
656 flags |= EFX_DISCARD;
659 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
661 * Hardware parse failed, due to malformed headers
662 * or headers that are too long for the parser.
663 * Headers and checksums must be validated by the host.
665 EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
669 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
670 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
671 flags |= EFX_PKT_VLAN_TAGGED;
675 case ESE_DZ_L3_CLASS_IP4:
676 case ESE_DZ_L3_CLASS_IP4_FRAG:
677 flags |= EFX_PKT_IPV4;
678 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
679 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
681 flags |= EFX_CKSUM_IPV4;
685 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
686 * only 2 bits wide on Medford2. Check it is safe to use the
687 * Medford2 field and values for all EF10 controllers.
689 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
690 ESF_DE_RX_L4_CLASS_LBN);
691 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
692 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
693 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
694 ESE_DE_L4_CLASS_UNKNOWN);
696 if (l4_class == ESE_FZ_L4_CLASS_TCP) {
697 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
698 flags |= EFX_PKT_TCP;
699 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
700 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
701 flags |= EFX_PKT_UDP;
703 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
707 case ESE_DZ_L3_CLASS_IP6:
708 case ESE_DZ_L3_CLASS_IP6_FRAG:
709 flags |= EFX_PKT_IPV6;
712 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
713 * only 2 bits wide on Medford2. Check it is safe to use the
714 * Medford2 field and values for all EF10 controllers.
716 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
717 ESF_DE_RX_L4_CLASS_LBN);
718 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
719 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
720 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
721 ESE_DE_L4_CLASS_UNKNOWN);
723 if (l4_class == ESE_FZ_L4_CLASS_TCP) {
724 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
725 flags |= EFX_PKT_TCP;
726 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
727 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
728 flags |= EFX_PKT_UDP;
730 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
735 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
739 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
740 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
741 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
743 flags |= EFX_CKSUM_TCPUDP;
748 /* If we're not discarding the packet then it is ok */
749 if (~flags & EFX_DISCARD)
750 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
752 EFSYS_ASSERT(eecp->eec_rx != NULL);
753 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
755 return (should_abort);
758 static __checkReturn boolean_t
761 __in efx_qword_t *eqp,
762 __in const efx_ev_callbacks_t *eecp,
765 efx_nic_t *enp = eep->ee_enp;
768 boolean_t should_abort;
770 EFX_EV_QSTAT_INCR(eep, EV_TX);
772 /* Discard events after RXQ/TXQ errors, or hardware not available */
773 if (enp->en_reset_flags &
774 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
777 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
778 /* Drop this event */
782 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
783 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
784 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
786 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
788 EFSYS_ASSERT(eecp->eec_tx != NULL);
789 should_abort = eecp->eec_tx(arg, label, id);
791 return (should_abort);
794 static __checkReturn boolean_t
797 __in efx_qword_t *eqp,
798 __in const efx_ev_callbacks_t *eecp,
802 boolean_t should_abort;
804 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
805 should_abort = B_FALSE;
807 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
809 case ESE_DZ_DRV_TIMER_EV: {
812 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
814 EFSYS_ASSERT(eecp->eec_timer != NULL);
815 should_abort = eecp->eec_timer(arg, id);
819 case ESE_DZ_DRV_WAKE_UP_EV: {
822 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
824 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
825 should_abort = eecp->eec_wake_up(arg, id);
829 case ESE_DZ_DRV_START_UP_EV:
830 EFSYS_ASSERT(eecp->eec_initialized != NULL);
831 should_abort = eecp->eec_initialized(arg);
835 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
836 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
837 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
841 return (should_abort);
844 static __checkReturn boolean_t
847 __in efx_qword_t *eqp,
848 __in const efx_ev_callbacks_t *eecp,
852 boolean_t should_abort;
854 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
855 should_abort = B_FALSE;
857 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
858 if (data >= ((uint32_t)1 << 16)) {
859 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
860 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
861 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
866 EFSYS_ASSERT(eecp->eec_software != NULL);
867 should_abort = eecp->eec_software(arg, (uint16_t)data);
869 return (should_abort);
872 static __checkReturn boolean_t
875 __in efx_qword_t *eqp,
876 __in const efx_ev_callbacks_t *eecp,
879 efx_nic_t *enp = eep->ee_enp;
881 boolean_t should_abort = B_FALSE;
883 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
885 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
887 case MCDI_EVENT_CODE_BADSSERT:
888 efx_mcdi_ev_death(enp, EINTR);
891 case MCDI_EVENT_CODE_CMDDONE:
893 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
894 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
895 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
898 #if EFSYS_OPT_MCDI_PROXY_AUTH
899 case MCDI_EVENT_CODE_PROXY_RESPONSE:
901 * This event notifies a function that an authorization request
902 * has been processed. If the request was authorized then the
903 * function can now re-send the original MCDI request.
904 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
906 efx_mcdi_ev_proxy_response(enp,
907 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
908 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
910 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
912 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
913 case MCDI_EVENT_CODE_PROXY_REQUEST:
914 efx_mcdi_ev_proxy_request(enp,
915 MCDI_EV_FIELD(eqp, PROXY_REQUEST_BUFF_INDEX));
917 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
919 case MCDI_EVENT_CODE_LINKCHANGE: {
920 efx_link_mode_t link_mode;
922 ef10_phy_link_ev(enp, eqp, &link_mode);
923 should_abort = eecp->eec_link_change(arg, link_mode);
927 case MCDI_EVENT_CODE_SENSOREVT: {
928 #if EFSYS_OPT_MON_STATS
930 efx_mon_stat_value_t value;
933 /* Decode monitor stat for MCDI sensor (if supported) */
934 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
935 /* Report monitor stat change */
936 should_abort = eecp->eec_monitor(arg, id, value);
937 } else if (rc == ENOTSUP) {
938 should_abort = eecp->eec_exception(arg,
939 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
940 MCDI_EV_FIELD(eqp, DATA));
942 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
948 case MCDI_EVENT_CODE_SCHEDERR:
949 /* Informational only */
952 case MCDI_EVENT_CODE_REBOOT:
953 /* Falcon/Siena only (should not been seen with Huntington). */
954 efx_mcdi_ev_death(enp, EIO);
957 case MCDI_EVENT_CODE_MC_REBOOT:
958 /* MC_REBOOT event is used for Huntington (EF10) and later. */
959 efx_mcdi_ev_death(enp, EIO);
962 case MCDI_EVENT_CODE_MAC_STATS_DMA:
963 #if EFSYS_OPT_MAC_STATS
964 if (eecp->eec_mac_stats != NULL) {
965 eecp->eec_mac_stats(arg,
966 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
971 case MCDI_EVENT_CODE_FWALERT: {
972 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
974 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
975 should_abort = eecp->eec_exception(arg,
976 EFX_EXCEPTION_FWALERT_SRAM,
977 MCDI_EV_FIELD(eqp, FWALERT_DATA));
979 should_abort = eecp->eec_exception(arg,
980 EFX_EXCEPTION_UNKNOWN_FWALERT,
981 MCDI_EV_FIELD(eqp, DATA));
985 case MCDI_EVENT_CODE_TX_ERR: {
987 * After a TXQ error is detected, firmware sends a TX_ERR event.
988 * This may be followed by TX completions (which we discard),
989 * and then finally by a TX_FLUSH event. Firmware destroys the
990 * TXQ automatically after sending the TX_FLUSH event.
992 enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
994 EFSYS_PROBE2(tx_descq_err,
995 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
996 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
998 /* Inform the driver that a reset is required. */
999 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1000 MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1004 case MCDI_EVENT_CODE_TX_FLUSH: {
1005 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1008 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1009 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1010 * We want to wait for all completions, so ignore the events
1011 * with TX_FLUSH_TO_DRIVER.
1013 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1014 should_abort = B_FALSE;
1018 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1020 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1022 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1023 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1027 case MCDI_EVENT_CODE_RX_ERR: {
1029 * After an RXQ error is detected, firmware sends an RX_ERR
1030 * event. This may be followed by RX events (which we discard),
1031 * and then finally by an RX_FLUSH event. Firmware destroys the
1032 * RXQ automatically after sending the RX_FLUSH event.
1034 enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1036 EFSYS_PROBE2(rx_descq_err,
1037 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1038 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1040 /* Inform the driver that a reset is required. */
1041 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1042 MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1046 case MCDI_EVENT_CODE_RX_FLUSH: {
1047 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1050 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1051 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1052 * We want to wait for all completions, so ignore the events
1053 * with RX_FLUSH_TO_DRIVER.
1055 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1056 should_abort = B_FALSE;
1060 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1062 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1064 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1065 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1070 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1071 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1072 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1076 return (should_abort);
1080 ef10_ev_rxlabel_init(
1081 __in efx_evq_t *eep,
1082 __in efx_rxq_t *erp,
1083 __in unsigned int label,
1084 __in efx_rxq_type_t type)
1086 efx_evq_rxq_state_t *eersp;
1087 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1088 boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
1089 boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
1092 _NOTE(ARGUNUSED(type))
1093 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1094 eersp = &eep->ee_rxq_state[label];
1096 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1098 #if EFSYS_OPT_RX_PACKED_STREAM
1100 * For packed stream modes, the very first event will
1101 * have a new buffer flag set, so it will be incremented,
1102 * yielding the correct pointer. That results in a simpler
1103 * code than trying to detect start-of-the-world condition
1104 * in the event handler.
1106 eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
1108 eersp->eers_rx_read_ptr = 0;
1110 eersp->eers_rx_mask = erp->er_mask;
1111 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1112 eersp->eers_rx_stream_npackets = 0;
1113 eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
1115 #if EFSYS_OPT_RX_PACKED_STREAM
1116 if (packed_stream) {
1117 eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
1118 EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
1119 EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
1120 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
1122 * A single credit is allocated to the queue when it is started.
1123 * It is immediately spent by the first packet which has NEW
1124 * BUFFER flag set, though, but still we shall take into
1125 * account, as to not wrap around the maximum number of credits
1128 eersp->eers_rx_packed_stream_credits--;
1129 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
1130 EFX_RX_PACKED_STREAM_MAX_CREDITS);
1136 ef10_ev_rxlabel_fini(
1137 __in efx_evq_t *eep,
1138 __in unsigned int label)
1140 efx_evq_rxq_state_t *eersp;
1142 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1143 eersp = &eep->ee_rxq_state[label];
1145 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1147 eersp->eers_rx_read_ptr = 0;
1148 eersp->eers_rx_mask = 0;
1149 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1150 eersp->eers_rx_stream_npackets = 0;
1151 eersp->eers_rx_packed_stream = B_FALSE;
1153 #if EFSYS_OPT_RX_PACKED_STREAM
1154 eersp->eers_rx_packed_stream_credits = 0;
1158 #endif /* EFX_OPTS_EF10() */