/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright(c) 2019-2020 Xilinx, Inc.
+ * Copyright(c) 2019-2021 Xilinx, Inc.
* Copyright(c) 2007-2019 Solarflare Communications Inc.
*/
__in uint32_t id,
__in uint32_t us,
__in uint32_t flags,
+ __in uint32_t irq,
__in efx_evq_t *eep);
static void
};
#endif /* EFX_OPTS_EF10() */
+#if EFSYS_OPT_RIVERHEAD
+static const efx_ev_ops_t __efx_ev_rhead_ops = {
+ rhead_ev_init, /* eevo_init */
+ rhead_ev_fini, /* eevo_fini */
+ rhead_ev_qcreate, /* eevo_qcreate */
+ rhead_ev_qdestroy, /* eevo_qdestroy */
+ rhead_ev_qprime, /* eevo_qprime */
+ rhead_ev_qpost, /* eevo_qpost */
+ rhead_ev_qpoll, /* eevo_qpoll */
+ rhead_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ rhead_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_RIVERHEAD */
+
__checkReturn efx_rc_t
efx_ev_init(
break;
#endif /* EFSYS_OPT_MEDFORD2 */
+#if EFSYS_OPT_RIVERHEAD
+ case EFX_FAMILY_RIVERHEAD:
+ eevop = &__efx_ev_rhead_ops;
+ break;
+#endif /* EFSYS_OPT_RIVERHEAD */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
__checkReturn size_t
efx_evq_size(
__in const efx_nic_t *enp,
- __in unsigned int ndescs)
+ __in unsigned int ndescs,
+ __in uint32_t flags)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+ size_t desc_size;
+
+ desc_size = encp->enc_ev_desc_size;
+
+#if EFSYS_OPT_EV_EXTENDED_WIDTH
+ if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
+ desc_size = encp->enc_ev_ew_desc_size;
+#else
+ EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0);
+#endif
- return (ndescs * encp->enc_ev_desc_size);
+ return (ndescs * desc_size);
}
__checkReturn unsigned int
efx_evq_nbufs(
__in const efx_nic_t *enp,
- __in unsigned int ndescs)
+ __in unsigned int ndescs,
+ __in uint32_t flags)
{
- return (EFX_DIV_ROUND_UP(efx_evq_size(enp, ndescs), EFX_BUF_SIZE));
+ size_t size;
+
+ size = efx_evq_size(enp, ndescs, flags);
+
+ return (EFX_DIV_ROUND_UP(size, EFX_BUF_SIZE));
}
void
__checkReturn efx_rc_t
-efx_ev_qcreate(
+efx_ev_qcreate_irq(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
__in uint32_t id,
__in uint32_t us,
__in uint32_t flags,
+ __in uint32_t irq,
__deref_out efx_evq_t **eepp)
{
const efx_ev_ops_t *eevop = enp->en_eevop;
EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
enp->en_nic_cfg.enc_evq_limit);
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
break;
case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
if (us != 0) {
rc = EINVAL;
- goto fail1;
+ goto fail3;
}
break;
default:
rc = EINVAL;
- goto fail2;
+ goto fail4;
+ }
+
+ if ((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) &&
+ (encp->enc_ev_ew_desc_size == 0)) {
+ /* Extended width event descriptors are not supported. */
+ rc = EINVAL;
+ goto fail5;
}
EFSYS_ASSERT(ISP2(encp->enc_evq_max_nevs));
ndescs < encp->enc_evq_min_nevs ||
ndescs > encp->enc_evq_max_nevs) {
rc = EINVAL;
- goto fail3;
+ goto fail6;
+ }
+
+ if (EFSYS_MEM_SIZE(esmp) < (ndescs * encp->enc_ev_desc_size)) {
+ /* Buffer too small for event queue descriptors. */
+ rc = EINVAL;
+ goto fail7;
}
/* Allocate an EVQ object */
EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
if (eep == NULL) {
rc = ENOMEM;
- goto fail4;
+ goto fail8;
}
eep->ee_magic = EFX_EVQ_MAGIC;
*eepp = eep;
if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
- eep)) != 0)
- goto fail5;
+ irq, eep)) != 0)
+ goto fail9;
return (0);
-fail5:
- EFSYS_PROBE(fail5);
+fail9:
+ EFSYS_PROBE(fail9);
*eepp = NULL;
enp->en_ev_qcount--;
EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
return (rc);
}
+ __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __deref_out efx_evq_t **eepp)
+{
+ uint32_t irq = index;
+
+ return (efx_ev_qcreate_irq(enp, index, esmp, ndescs, id, us, flags,
+ irq, eepp));
+}
+
void
efx_ev_qdestroy(
__in efx_evq_t *eep)
#endif /* EFSYS_OPT_EV_PREFETCH */
+/*
+ * This method is needed to ensure that eec_initialized callback
+ * is invoked after queue creation. The callback will be invoked
+ * on Riverhead boards which have no support for INIT_DONE events
+ * and will do nothing on other boards.
+ *
+ * The client drivers must call this method after calling efx_ev_create().
+ * The call must be done with the same locks being held (if any) which are
+ * normally acquired around efx_ev_qpoll() calls to ensure that
+ * eec_initialized callback is invoked within the same locking context.
+ */
+ void
+efx_ev_qcreate_check_init_done(
+ __in efx_evq_t *eep,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ const efx_nic_cfg_t *encp;
+
+ EFSYS_ASSERT(eep != NULL);
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+ EFSYS_ASSERT(eecp != NULL);
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+
+ encp = efx_nic_cfg_get(eep->ee_enp);
+
+ if (encp->enc_evq_init_done_ev_supported == B_FALSE)
+ (void) eecp->eec_initialized(arg);
+}
+
void
efx_ev_qpoll(
__in efx_evq_t *eep,
__in uint32_t id,
__in uint32_t us,
__in uint32_t flags,
+ __in uint32_t irq,
__in efx_evq_t *eep)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
_NOTE(ARGUNUSED(esmp))
- if (index >= encp->enc_evq_limit) {
+ EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0);
+
+ if (irq != index) {
rc = EINVAL;
goto fail1;
}
+
#if EFSYS_OPT_RX_SCALE
if (enp->en_intr.ei_type == EFX_INTR_LINE &&
index >= EFX_MAXRSS_LEGACY) {