X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fbase%2Fef10_ev.c;h=99cae3fa3b0ca914842096c9cd99fe8e6005cb03;hb=04ab83ea5a28dc873a0959ad3cacfa9886eeec87;hp=b4fe9a7d6a8d9b27665b1593eb6340b4b9eea937;hpb=7243cc0869f885464ec7cf4cc92bd223481f8926;p=dpdk.git diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c index b4fe9a7d6a..99cae3fa3b 100644 --- a/drivers/net/sfc/base/ef10_ev.c +++ b/drivers/net/sfc/base/ef10_ev.c @@ -1,48 +1,17 @@ -/* - * Copyright (c) 2012-2016 Solarflare Communications Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of the FreeBSD Project. + * Copyright (c) 2012-2018 Solarflare Communications Inc. + * All rights reserved. */ #include "efx.h" #include "efx_impl.h" - -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD - -#if EFSYS_OPT_QSTATS -#define EFX_EV_QSTAT_INCR(_eep, _stat) \ - do { \ - (_eep)->ee_stat[_stat]++; \ - _NOTE(CONSTANTCONDITION) \ - } while (B_FALSE) -#else -#define EFX_EV_QSTAT_INCR(_eep, _stat) +#if EFSYS_OPT_MON_STATS +#include "mcdi_mon.h" #endif +#if EFX_OPTS_EF10() + /* * Non-interrupting event queue requires interrrupting event queue to * refer to for wake-up events even if wake ups are never used. @@ -94,11 +63,10 @@ efx_mcdi_set_evq_tmr( __in uint32_t timer_ns) { efx_mcdi_req_t req; - uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN, - MC_CMD_SET_EVQ_TMR_OUT_LEN)]; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN, + MC_CMD_SET_EVQ_TMR_OUT_LEN); efx_rc_t rc; - (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_EVQ_TMR; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN; @@ -144,9 +112,9 @@ efx_mcdi_init_evq( __in boolean_t low_latency) { efx_mcdi_req_t req; - uint8_t payload[ - MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), - MC_CMD_INIT_EVQ_OUT_LEN)]; + EFX_MCDI_DECLARE_BUF(payload, + MC_CMD_INIT_EVQ_IN_LEN(EF10_EVQ_MAXNBUFS), + MC_CMD_INIT_EVQ_OUT_LEN); efx_qword_t *dma_addr; uint64_t addr; int npages; @@ -155,13 +123,12 @@ efx_mcdi_init_evq( int ev_cut_through; efx_rc_t rc; - npages = EFX_EVQ_NBUFS(nevs); - if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) { + npages = efx_evq_nbufs(enp, nevs); + if (npages > EF10_EVQ_MAXNBUFS) { rc = EINVAL; goto fail1; } - (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); @@ -281,9 +248,9 @@ efx_mcdi_init_evq_v2( __in uint32_t flags) { efx_mcdi_req_t req; - uint8_t payload[ - MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), - MC_CMD_INIT_EVQ_V2_OUT_LEN)]; + EFX_MCDI_DECLARE_BUF(payload, + MC_CMD_INIT_EVQ_V2_IN_LEN(EF10_EVQ_MAXNBUFS), + MC_CMD_INIT_EVQ_V2_OUT_LEN); boolean_t interrupting; unsigned int evq_type; efx_qword_t *dma_addr; @@ -292,13 +259,12 @@ efx_mcdi_init_evq_v2( int i; efx_rc_t rc; - npages = EFX_EVQ_NBUFS(nevs); - if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) { + npages = efx_evq_nbufs(enp, nevs); + if (npages > EF10_EVQ_MAXNBUFS) { rc = EINVAL; goto fail1; } - (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); @@ -405,11 +371,10 @@ efx_mcdi_fini_evq( __in uint32_t instance) { efx_mcdi_req_t req; - uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN, - MC_CMD_FINI_EVQ_OUT_LEN)]; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN, + MC_CMD_FINI_EVQ_OUT_LEN); efx_rc_t rc; - (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; @@ -428,7 +393,12 @@ efx_mcdi_fini_evq( return (0); fail1: - EFSYS_PROBE1(fail1, efx_rc_t, rc); + /* + * EALREADY is not an error, but indicates that the MC has rebooted and + * that the EVQ has already been destroyed. + */ + if (rc != EALREADY) + EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } @@ -455,7 +425,7 @@ ef10_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint32_t us, __in uint32_t flags, @@ -466,22 +436,32 @@ ef10_ev_qcreate( efx_rc_t rc; _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ - EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); - EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); - if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { + if (index >= encp->enc_evq_limit) { rc = EINVAL; goto fail1; } - if (index >= encp->enc_evq_limit) { + if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail2; } - if (us > encp->enc_evq_timer_max_us) { - rc = EINVAL; - goto fail3; + /* + * NO_CONT_EV mode is only requested from the firmware when creating + * receive queues, but here it needs to be specified at event queue + * creation, as the event handler needs to know which format is in use. + * + * If EFX_EVQ_FLAGS_NO_CONT_EV is specified, all receive queues for this + * event queue will be created in NO_CONT_EV mode. + * + * See SF-109306-TC 5.11 "Events for RXQs in NO_CONT_EV mode". + */ + if (flags & EFX_EVQ_FLAGS_NO_CONT_EV) { + if (enp->en_nic_cfg.enc_no_cont_ev_mode_supported == B_FALSE) { + rc = EINVAL; + goto fail3; + } } /* Set up the handler table */ @@ -518,7 +498,8 @@ ef10_ev_qcreate( * it will choose the best settings for low latency, otherwise * it will choose the best settings for throughput. */ - rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags); + rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us, + flags); if (rc != 0) goto fail4; } else { @@ -534,7 +515,7 @@ ef10_ev_qcreate( * to choose it.) */ boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1; - rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags, + rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags, low_latency); if (rc != 0) goto fail5; @@ -562,10 +543,9 @@ ef10_ev_qdestroy( { efx_nic_t *enp = eep->ee_enp; - EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); - (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index); + (void) efx_mcdi_fini_evq(enp, eep->ee_index); } __checkReturn efx_rc_t @@ -580,9 +560,9 @@ ef10_ev_qprime( rptr = count & eep->ee_mask; if (enp->en_nic_cfg.enc_bug35388_workaround) { - EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS > + EFX_STATIC_ASSERT(EF10_EVQ_MINNEVS > (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); - EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS < + EFX_STATIC_ASSERT(EF10_EVQ_MAXNEVS < (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_POPULATE_DWORD_2(dword, @@ -590,7 +570,7 @@ ef10_ev_qprime( EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, ERF_DD_EVQ_IND_RPTR, (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); - EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); EFX_POPULATE_DWORD_2(dword, @@ -598,11 +578,11 @@ ef10_ev_qprime( EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, ERF_DD_EVQ_IND_RPTR, rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); - EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); } else { EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); - EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, + EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); } @@ -616,8 +596,8 @@ efx_mcdi_driver_event( __in efx_qword_t data) { efx_mcdi_req_t req; - uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN, - MC_CMD_DRIVER_EVENT_OUT_LEN)]; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN, + MC_CMD_DRIVER_EVENT_OUT_LEN); efx_rc_t rc; req.emr_cmd = MC_CMD_DRIVER_EVENT; @@ -715,13 +695,19 @@ ef10_ev_qmoderate( EFE_DD_EVQ_IND_TIMER_FLAGS, ERF_DD_EVQ_IND_TIMER_MODE, mode, ERF_DD_EVQ_IND_TIMER_VAL, ticks); - EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, 0); } else { - EFX_POPULATE_DWORD_2(dword, + /* + * NOTE: The TMR_REL field introduced in Medford2 is + * ignored on earlier EF10 controllers. See bug66418 + * comment 9 for details. + */ + EFX_POPULATE_DWORD_3(dword, ERF_DZ_TC_TIMER_MODE, mode, - ERF_DZ_TC_TIMER_VAL, ticks); - EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG, + ERF_DZ_TC_TIMER_VAL, ticks, + ERF_FZ_TC_TMR_REL_VAL, ticks); + EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG, eep->ee_index, &dword, 0); } } @@ -756,6 +742,101 @@ ef10_ev_qstats_update( } #endif /* EFSYS_OPT_QSTATS */ +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + +static __checkReturn boolean_t +ef10_ev_rx_packed_stream( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + uint32_t label; + uint32_t pkt_count_lbits; + uint16_t flags; + boolean_t should_abort; + efx_evq_rxq_state_t *eersp; + unsigned int pkt_count; + unsigned int current_id; + boolean_t new_buffer; + + pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); + label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); + new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE); + + flags = 0; + + eersp = &eep->ee_rxq_state[label]; + + /* + * RX_DSC_PTR_LBITS has least significant bits of the global + * (not per-buffer) packet counter. It is guaranteed that + * maximum number of completed packets fits in lbits-mask. + * So, modulo lbits-mask arithmetic should be used to calculate + * packet counter increment. + */ + pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) & + EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); + eersp->eers_rx_stream_npackets += pkt_count; + + if (new_buffer) { + flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER; +#if EFSYS_OPT_RX_PACKED_STREAM + /* + * If both packed stream and equal stride super-buffer + * modes are compiled in, in theory credits should be + * be maintained for packed stream only, but right now + * these modes are not distinguished in the event queue + * Rx queue state and it is OK to increment the counter + * regardless (it might be event cheaper than branching + * since neighbour structure member are updated as well). + */ + eersp->eers_rx_packed_stream_credits++; +#endif + eersp->eers_rx_read_ptr++; + } + current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask; + + /* Check for errors that invalidate checksum and L3/L4 fields */ + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { + /* RX frame truncated */ + EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); + flags |= EFX_DISCARD; + goto deliver; + } + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { + /* Bad Ethernet frame CRC */ + EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); + flags |= EFX_DISCARD; + goto deliver; + } + + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { + EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); + flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE; + goto deliver; + } + + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) + EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); + + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) + EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); + +deliver: + /* If we're not discarding the packet then it is ok */ + if (~flags & EFX_DISCARD) + EFX_EV_QSTAT_INCR(eep, EV_RX_OK); + + EFSYS_ASSERT(eecp->eec_rx_ps != NULL); + should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count, + flags); + + return (should_abort); +} + +#endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */ + static __checkReturn boolean_t ef10_ev_rx( __in efx_evq_t *eep, @@ -780,21 +861,42 @@ ef10_ev_rx( EFX_EV_QSTAT_INCR(eep, EV_RX); - /* Discard events after RXQ/TXQ errors */ - if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) + /* Discard events after RXQ/TXQ errors, or hardware not available */ + if (enp->en_reset_flags & + (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL)) return (B_FALSE); /* Basic packet information */ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); eersp = &eep->ee_rxq_state[label]; +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + /* + * Packed stream events are very different, + * so handle them separately + */ + if (eersp->eers_rx_packed_stream) + return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg)); +#endif + size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); + cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); - l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); - cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); + + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only + * 2 bits wide on Medford2. Check it is safe to use the Medford2 field + * and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN); + + l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { /* Drop this event */ @@ -818,26 +920,50 @@ ef10_ev_rx( if (mac_class == ESE_DZ_MAC_CLASS_UCAST) flags |= EFX_PKT_UNICAST; - /* Increment the count of descriptors read */ + /* + * Increment the count of descriptors read. + * + * In NO_CONT_EV mode, RX_DSC_PTR_LBITS is actually a packet count, but + * when scatter is disabled, there is only one descriptor per packet and + * so it can be treated the same. + * + * TODO: Support scatter in NO_CONT_EV mode. + */ desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); eersp->eers_rx_read_ptr += desc_count; - /* - * FIXME: add error checking to make sure this a batched event. - * This could also be an aborted scatter, see Bug36629. - */ - if (desc_count > 1) { + /* Calculate the index of the last descriptor consumed */ + last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; + + if (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV) { + if (desc_count > 1) + EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); + + /* Always read the length from the prefix in NO_CONT_EV mode. */ + flags |= EFX_PKT_PREFIX_LEN; + + /* + * Check for an aborted scatter, signalled by the ABORT bit in + * NO_CONT_EV mode. The ABORT bit was not used before NO_CONT_EV + * mode was added as it was broken in Huntington silicon. + */ + if (EFX_QWORD_FIELD(*eqp, ESF_EZ_RX_ABORT) != 0) { + flags |= EFX_DISCARD; + goto deliver; + } + } else if (desc_count > 1) { + /* + * FIXME: add error checking to make sure this a batched event. + * This could also be an aborted scatter, see Bug36629. + */ EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); flags |= EFX_PKT_PREFIX_LEN; } - /* Calculate the index of the last descriptor consumed */ - last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; - /* Check for errors that invalidate checksum and L3/L4 fields */ - if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { - /* RX frame truncated (error flag is misnamed) */ + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { + /* RX frame truncated */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; @@ -854,7 +980,7 @@ ef10_ev_rx( * or headers that are too long for the parser. * Headers and checksums must be validated by the host. */ - /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */ + EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); goto deliver; } @@ -873,10 +999,22 @@ ef10_ev_rx( flags |= EFX_CKSUM_IPV4; } - if (l4_class == ESE_DZ_L4_CLASS_TCP) { + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is + * only 2 bits wide on Medford2. Check it is safe to use the + * Medford2 field and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == + ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == + ESE_DE_L4_CLASS_UNKNOWN); + + if (l4_class == ESE_FZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_TCP; - } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { + } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_UDP; } else { @@ -888,10 +1026,22 @@ ef10_ev_rx( case ESE_DZ_L3_CLASS_IP6_FRAG: flags |= EFX_PKT_IPV6; - if (l4_class == ESE_DZ_L4_CLASS_TCP) { + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is + * only 2 bits wide on Medford2. Check it is safe to use the + * Medford2 field and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == + ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == + ESE_DE_L4_CLASS_UNKNOWN); + + if (l4_class == ESE_FZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_TCP; - } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { + } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_UDP; } else { @@ -937,8 +1087,9 @@ ef10_ev_tx( EFX_EV_QSTAT_INCR(eep, EV_TX); - /* Discard events after RXQ/TXQ errors */ - if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) + /* Discard events after RXQ/TXQ errors, or hardware not available */ + if (enp->en_reset_flags & + (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL)) return (B_FALSE); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { @@ -1085,6 +1236,23 @@ ef10_ev_mcdi( } case MCDI_EVENT_CODE_SENSOREVT: { +#if EFSYS_OPT_MON_STATS + efx_mon_stat_t id; + efx_mon_stat_value_t value; + efx_rc_t rc; + + /* Decode monitor stat for MCDI sensor (if supported) */ + if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { + /* Report monitor stat change */ + should_abort = eecp->eec_monitor(arg, id, value); + } else if (rc == ENOTSUP) { + should_abort = eecp->eec_exception(arg, + EFX_EXCEPTION_UNKNOWN_SENSOREVT, + MCDI_EV_FIELD(eqp, DATA)); + } else { + EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ + } +#endif break; } @@ -1103,6 +1271,12 @@ ef10_ev_mcdi( break; case MCDI_EVENT_CODE_MAC_STATS_DMA: +#if EFSYS_OPT_MAC_STATS + if (eecp->eec_mac_stats != NULL) { + eecp->eec_mac_stats(arg, + MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); + } +#endif break; case MCDI_EVENT_CODE_FWALERT: { @@ -1218,18 +1392,55 @@ ef10_ev_rxlabel_init( __in efx_evq_t *eep, __in efx_rxq_t *erp, __in unsigned int label, - __in boolean_t packed_stream) + __in efx_rxq_type_t type) { efx_evq_rxq_state_t *eersp; +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM); + boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER); +#endif + _NOTE(ARGUNUSED(type)) EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); +#if EFSYS_OPT_RX_PACKED_STREAM + /* + * For packed stream modes, the very first event will + * have a new buffer flag set, so it will be incremented, + * yielding the correct pointer. That results in a simpler + * code than trying to detect start-of-the-world condition + * in the event handler. + */ + eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0; +#else eersp->eers_rx_read_ptr = 0; +#endif eersp->eers_rx_mask = erp->er_mask; - EFSYS_ASSERT(!packed_stream); +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + eersp->eers_rx_stream_npackets = 0; + eersp->eers_rx_packed_stream = packed_stream || es_super_buffer; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM + if (packed_stream) { + eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) / + EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT, + EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE); + EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0); + /* + * A single credit is allocated to the queue when it is started. + * It is immediately spent by the first packet which has NEW + * BUFFER flag set, though, but still we shall take into + * account, as to not wrap around the maximum number of credits + * accidentally + */ + eersp->eers_rx_packed_stream_credits--; + EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=, + EFX_RX_PACKED_STREAM_MAX_CREDITS); + } +#endif } void @@ -1246,6 +1457,13 @@ ef10_ev_rxlabel_fini( eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = 0; +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + eersp->eers_rx_stream_npackets = 0; + eersp->eers_rx_packed_stream = B_FALSE; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM + eersp->eers_rx_packed_stream_credits = 0; +#endif } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFX_OPTS_EF10() */