net/sfc: fix assert in set multicast address list
[dpdk.git] / drivers / net / sfc / base / ef10_ev.c
index 46ecd42..7f89a7b 100644 (file)
@@ -1,39 +1,26 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
  *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
  */
 
 #include "efx.h"
 #include "efx_impl.h"
-
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-
+#if EFSYS_OPT_MON_STATS
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#if EFSYS_OPT_QSTATS
+#define        EFX_EV_QSTAT_INCR(_eep, _stat)                                  \
+       do {                                                            \
+               (_eep)->ee_stat[_stat]++;                               \
+       _NOTE(CONSTANTCONDITION)                                        \
+       } while (B_FALSE)
+#else
 #define        EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
 
 /*
  * Non-interrupting event queue requires interrrupting event queue to
@@ -420,7 +407,12 @@ efx_mcdi_fini_evq(
        return (0);
 
 fail1:
-       EFSYS_PROBE1(fail1, efx_rc_t, rc);
+       /*
+        * EALREADY is not an error, but indicates that the MC has rebooted and
+        * that the EVQ has already been destroyed.
+        */
+       if (rc != EALREADY)
+               EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
        return (rc);
 }
@@ -447,7 +439,7 @@ ef10_ev_qcreate(
        __in            efx_nic_t *enp,
        __in            unsigned int index,
        __in            efsys_mem_t *esmp,
-       __in            size_t n,
+       __in            size_t ndescs,
        __in            uint32_t id,
        __in            uint32_t us,
        __in            uint32_t flags,
@@ -461,7 +453,8 @@ ef10_ev_qcreate(
        EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
        EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
 
-       if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+       if (!ISP2(ndescs) ||
+           (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
                rc = EINVAL;
                goto fail1;
        }
@@ -510,7 +503,8 @@ ef10_ev_qcreate(
                 * it will choose the best settings for low latency, otherwise
                 * it will choose the best settings for throughput.
                 */
-               rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
+               rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us,
+                   flags);
                if (rc != 0)
                        goto fail4;
        } else {
@@ -526,7 +520,7 @@ ef10_ev_qcreate(
                 * to choose it.)
                 */
                boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
-               rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
+               rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
                    low_latency);
                if (rc != 0)
                        goto fail5;
@@ -555,9 +549,10 @@ ef10_ev_qdestroy(
        efx_nic_t *enp = eep->ee_enp;
 
        EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
-           enp->en_family == EFX_FAMILY_MEDFORD);
+           enp->en_family == EFX_FAMILY_MEDFORD ||
+           enp->en_family == EFX_FAMILY_MEDFORD2);
 
-       (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
+       (void) efx_mcdi_fini_evq(enp, eep->ee_index);
 }
 
        __checkReturn   efx_rc_t
@@ -582,7 +577,7 @@ ef10_ev_qprime(
                    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
                    ERF_DD_EVQ_IND_RPTR,
                    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
-               EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+               EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
                    &dword, B_FALSE);
 
                EFX_POPULATE_DWORD_2(dword,
@@ -590,11 +585,11 @@ ef10_ev_qprime(
                    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
                    ERF_DD_EVQ_IND_RPTR,
                    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
-               EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+               EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
                    &dword, B_FALSE);
        } else {
                EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
-               EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
+               EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
                    &dword, B_FALSE);
        }
 
@@ -707,13 +702,19 @@ ef10_ev_qmoderate(
                            EFE_DD_EVQ_IND_TIMER_FLAGS,
                            ERF_DD_EVQ_IND_TIMER_MODE, mode,
                            ERF_DD_EVQ_IND_TIMER_VAL, ticks);
-                       EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
+                       EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
                            eep->ee_index, &dword, 0);
                } else {
-                       EFX_POPULATE_DWORD_2(dword,
+                       /*
+                        * NOTE: The TMR_REL field introduced in Medford2 is
+                        * ignored on earlier EF10 controllers. See bug66418
+                        * comment 9 for details.
+                        */
+                       EFX_POPULATE_DWORD_3(dword,
                            ERF_DZ_TC_TIMER_MODE, mode,
-                           ERF_DZ_TC_TIMER_VAL, ticks);
-                       EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
+                           ERF_DZ_TC_TIMER_VAL, ticks,
+                           ERF_FZ_TC_TMR_REL_VAL, ticks);
+                       EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
                            eep->ee_index, &dword, 0);
                }
        }
@@ -731,6 +732,117 @@ fail1:
 }
 
 
+#if EFSYS_OPT_QSTATS
+                       void
+ef10_ev_qstats_update(
+       __in                            efx_evq_t *eep,
+       __inout_ecount(EV_NQSTATS)      efsys_stat_t *stat)
+{
+       unsigned int id;
+
+       for (id = 0; id < EV_NQSTATS; id++) {
+               efsys_stat_t *essp = &stat[id];
+
+               EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+               eep->ee_stat[id] = 0;
+       }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+static __checkReturn   boolean_t
+ef10_ev_rx_packed_stream(
+       __in            efx_evq_t *eep,
+       __in            efx_qword_t *eqp,
+       __in            const efx_ev_callbacks_t *eecp,
+       __in_opt        void *arg)
+{
+       uint32_t label;
+       uint32_t pkt_count_lbits;
+       uint16_t flags;
+       boolean_t should_abort;
+       efx_evq_rxq_state_t *eersp;
+       unsigned int pkt_count;
+       unsigned int current_id;
+       boolean_t new_buffer;
+
+       pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+       label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+       new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
+
+       flags = 0;
+
+       eersp = &eep->ee_rxq_state[label];
+
+       /*
+        * RX_DSC_PTR_LBITS has least significant bits of the global
+        * (not per-buffer) packet counter. It is guaranteed that
+        * maximum number of completed packets fits in lbits-mask.
+        * So, modulo lbits-mask arithmetic should be used to calculate
+        * packet counter increment.
+        */
+       pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
+           EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+       eersp->eers_rx_stream_npackets += pkt_count;
+
+       if (new_buffer) {
+               flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
+#if EFSYS_OPT_RX_PACKED_STREAM
+               /*
+                * If both packed stream and equal stride super-buffer
+                * modes are compiled in, in theory credits should be
+                * be maintained for packed stream only, but right now
+                * these modes are not distinguished in the event queue
+                * Rx queue state and it is OK to increment the counter
+                * regardless (it might be event cheaper than branching
+                * since neighbour structure member are updated as well).
+                */
+               eersp->eers_rx_packed_stream_credits++;
+#endif
+               eersp->eers_rx_read_ptr++;
+       }
+       current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
+
+       /* Check for errors that invalidate checksum and L3/L4 fields */
+       if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
+               /* RX frame truncated */
+               EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+               flags |= EFX_DISCARD;
+               goto deliver;
+       }
+       if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+               /* Bad Ethernet frame CRC */
+               EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+               flags |= EFX_DISCARD;
+               goto deliver;
+       }
+
+       if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+               flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
+               goto deliver;
+       }
+
+       if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
+               EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+
+       if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
+               EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+
+deliver:
+       /* If we're not discarding the packet then it is ok */
+       if (~flags & EFX_DISCARD)
+               EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+       EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
+       should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
+           flags);
+
+       return (should_abort);
+}
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
+
 static __checkReturn   boolean_t
 ef10_ev_rx(
        __in            efx_evq_t *eep,
@@ -763,13 +875,33 @@ ef10_ev_rx(
        label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
        eersp = &eep->ee_rxq_state[label];
 
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+       /*
+        * Packed stream events are very different,
+        * so handle them separately
+        */
+       if (eersp->eers_rx_packed_stream)
+               return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
+#endif
+
        size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
+       cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
        next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
        eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
        mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
        l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
-       l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
-       cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
+
+       /*
+        * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
+        * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
+        * and values for all EF10 controllers.
+        */
+       EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
+       EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+       EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+       EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
+
+       l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
 
        if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
                /* Drop this event */
@@ -811,8 +943,8 @@ ef10_ev_rx(
        last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
 
        /* Check for errors that invalidate checksum and L3/L4 fields */
-       if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
-               /* RX frame truncated (error flag is misnamed) */
+       if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
+               /* RX frame truncated */
                EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
                flags |= EFX_DISCARD;
                goto deliver;
@@ -848,10 +980,22 @@ ef10_ev_rx(
                        flags |= EFX_CKSUM_IPV4;
                }
 
-               if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+               /*
+                * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
+                * only 2 bits wide on Medford2. Check it is safe to use the
+                * Medford2 field and values for all EF10 controllers.
+                */
+               EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
+                   ESF_DE_RX_L4_CLASS_LBN);
+               EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+               EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+               EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
+                   ESE_DE_L4_CLASS_UNKNOWN);
+
+               if (l4_class == ESE_FZ_L4_CLASS_TCP) {
                        EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
                        flags |= EFX_PKT_TCP;
-               } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+               } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
                        EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
                        flags |= EFX_PKT_UDP;
                } else {
@@ -863,10 +1007,22 @@ ef10_ev_rx(
        case ESE_DZ_L3_CLASS_IP6_FRAG:
                flags |= EFX_PKT_IPV6;
 
-               if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+               /*
+                * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
+                * only 2 bits wide on Medford2. Check it is safe to use the
+                * Medford2 field and values for all EF10 controllers.
+                */
+               EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
+                   ESF_DE_RX_L4_CLASS_LBN);
+               EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+               EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+               EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
+                   ESE_DE_L4_CLASS_UNKNOWN);
+
+               if (l4_class == ESE_FZ_L4_CLASS_TCP) {
                        EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
                        flags |= EFX_PKT_TCP;
-               } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+               } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
                        EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
                        flags |= EFX_PKT_UDP;
                } else {
@@ -1060,6 +1216,23 @@ ef10_ev_mcdi(
        }
 
        case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+               efx_mon_stat_t id;
+               efx_mon_stat_value_t value;
+               efx_rc_t rc;
+
+               /* Decode monitor stat for MCDI sensor (if supported) */
+               if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
+                       /* Report monitor stat change */
+                       should_abort = eecp->eec_monitor(arg, id, value);
+               } else if (rc == ENOTSUP) {
+                       should_abort = eecp->eec_exception(arg,
+                               EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+                               MCDI_EV_FIELD(eqp, DATA));
+               } else {
+                       EFSYS_ASSERT(rc == ENODEV);     /* Wrong port */
+               }
+#endif
                break;
        }
 
@@ -1078,6 +1251,12 @@ ef10_ev_mcdi(
                break;
 
        case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+               if (eecp->eec_mac_stats != NULL) {
+                       eecp->eec_mac_stats(arg,
+                           MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+               }
+#endif
                break;
 
        case MCDI_EVENT_CODE_FWALERT: {
@@ -1193,18 +1372,55 @@ ef10_ev_rxlabel_init(
        __in            efx_evq_t *eep,
        __in            efx_rxq_t *erp,
        __in            unsigned int label,
-       __in            boolean_t packed_stream)
+       __in            efx_rxq_type_t type)
 {
        efx_evq_rxq_state_t *eersp;
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+       boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
+       boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
+#endif
 
+       _NOTE(ARGUNUSED(type))
        EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
        eersp = &eep->ee_rxq_state[label];
 
        EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
 
+#if EFSYS_OPT_RX_PACKED_STREAM
+       /*
+        * For packed stream modes, the very first event will
+        * have a new buffer flag set, so it will be incremented,
+        * yielding the correct pointer. That results in a simpler
+        * code than trying to detect start-of-the-world condition
+        * in the event handler.
+        */
+       eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
+#else
        eersp->eers_rx_read_ptr = 0;
+#endif
        eersp->eers_rx_mask = erp->er_mask;
-       EFSYS_ASSERT(!packed_stream);
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+       eersp->eers_rx_stream_npackets = 0;
+       eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
+       if (packed_stream) {
+               eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
+                   EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
+                   EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
+               EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
+               /*
+                * A single credit is allocated to the queue when it is started.
+                * It is immediately spent by the first packet which has NEW
+                * BUFFER flag set, though, but still we shall take into
+                * account, as to not wrap around the maximum number of credits
+                * accidentally
+                */
+               eersp->eers_rx_packed_stream_credits--;
+               EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
+                   EFX_RX_PACKED_STREAM_MAX_CREDITS);
+       }
+#endif
 }
 
                void
@@ -1221,6 +1437,13 @@ ef10_ev_rxlabel_fini(
 
        eersp->eers_rx_read_ptr = 0;
        eersp->eers_rx_mask = 0;
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
+       eersp->eers_rx_stream_npackets = 0;
+       eersp->eers_rx_packed_stream = B_FALSE;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
+       eersp->eers_rx_packed_stream_credits = 0;
+#endif
 }
 
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */