#include <rte_debug.h>
#include <rte_cycles.h>
+#include <rte_alarm.h>
#include "efx.h"
#include "sfc_debug.h"
#include "sfc_log.h"
#include "sfc_ev.h"
+#include "sfc_rx.h"
/* Initial delay when waiting for event queue init complete event */
/* Event queue init approx timeout */
#define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
+/* Management event queue polling period in microseconds */
+#define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
+
static boolean_t
sfc_ev_initialized(void *arg)
sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
{
struct sfc_evq *evq = arg;
+ struct sfc_rxq *rxq;
- sfc_err(evq->sa, "EVQ %u unexpected Rx flush done event",
- evq->evq_index);
- return B_TRUE;
+ rxq = evq->rxq;
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+ SFC_ASSERT(rxq->evq == evq);
+ sfc_rx_qflush_done(rxq);
+
+ return B_FALSE;
}
static boolean_t
sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
{
struct sfc_evq *evq = arg;
+ struct sfc_rxq *rxq;
- sfc_err(evq->sa, "EVQ %u unexpected Rx flush failed event",
- evq->evq_index);
- return B_TRUE;
+ rxq = evq->rxq;
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+ SFC_ASSERT(rxq->evq == evq);
+ sfc_rx_qflush_failed(rxq);
+
+ return B_FALSE;
}
static boolean_t
}
static boolean_t
-sfc_ev_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
+sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
{
struct sfc_evq *evq = arg;
+ struct sfc_adapter *sa = evq->sa;
+ struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
+ struct rte_eth_link new_link;
- sfc_err(evq->sa, "EVQ %u unexpected link change",
- evq->evq_index);
- return B_TRUE;
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+
+ sfc_port_link_mode_to_info(link_mode, &new_link);
+ rte_atomic64_set((rte_atomic64_t *)dev_link, *(uint64_t *)&new_link);
+
+ return B_FALSE;
}
static const efx_ev_callbacks_t sfc_ev_callbacks = {
/* Poll-mode driver does not re-prime the event queue for interrupts */
}
+void
+sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
+{
+ if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
+ struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
+
+ if (mgmt_evq->init_state == SFC_EVQ_STARTED)
+ sfc_ev_qpoll(mgmt_evq);
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ }
+}
+
int
sfc_ev_qprime(struct sfc_evq *evq)
{
efx_ev_qdestroy(evq->common);
}
+static void
+sfc_ev_mgmt_periodic_qpoll(void *arg)
+{
+ struct sfc_adapter *sa = arg;
+ int rc;
+
+ sfc_ev_mgmt_qpoll(sa);
+
+ rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
+ sfc_ev_mgmt_periodic_qpoll, sa);
+ if (rc != 0)
+ sfc_panic(sa,
+ "cannot rearm management EVQ polling alarm (rc=%d)",
+ rc);
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
+{
+ sfc_ev_mgmt_periodic_qpoll(sa);
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
+{
+ rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
+}
+
int
sfc_ev_start(struct sfc_adapter *sa)
{
if (rc != 0)
goto fail_ev_init;
+ /* Start management EVQ used for global events */
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+
+ rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
+ if (rc != 0)
+ goto fail_mgmt_evq_start;
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
+ /*
+ * Start management EVQ polling. If interrupts are disabled
+ * (not used), it is required to process link status change
+ * and other device level events to avoid unrecoverable
+ * error because the event queue overflow.
+ */
+ sfc_ev_mgmt_periodic_qpoll_start(sa);
+
/*
- * Rx/Tx event queues are started/stopped when corresponding queue
- * is started/stopped.
+ * Rx/Tx event queues are started/stopped when corresponding
+ * Rx/Tx queue is started/stopped.
*/
return 0;
+fail_mgmt_evq_start:
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ efx_ev_fini(sa->nic);
+
fail_ev_init:
sfc_log_init(sa, "failed %d", rc);
return rc;
sfc_log_init(sa, "entry");
+ sfc_ev_mgmt_periodic_qpoll_stop(sa);
+
/* Make sure that all event queues are stopped */
sw_index = sa->evq_count;
- while (sw_index-- > 0)
- sfc_ev_qstop(sa, sw_index);
+ while (sw_index-- > 0) {
+ if (sw_index == sa->mgmt_evq_index) {
+ /* Locks are required for the management EVQ */
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+ sfc_ev_qstop(sa, sa->mgmt_evq_index);
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ } else {
+ sfc_ev_qstop(sa, sw_index);
+ }
+ }
efx_ev_fini(sa->nic);
}
sa->evq_count = sfc_ev_qcount(sa);
sa->mgmt_evq_index = 0;
+ rte_spinlock_init(&sa->mgmt_evq_lock);
/* Allocate EVQ info array */
rc = ENOMEM;
goto fail_ev_qinit_info;
}
+ rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
+ sa->socket_id);
+ if (rc != 0)
+ goto fail_mgmt_evq_init;
+
/*
* Rx/Tx event queues are created/destroyed when corresponding
* Rx/Tx queue is created/destroyed.
return 0;
+fail_mgmt_evq_init:
fail_ev_qinit_info:
while (sw_index-- > 0)
sfc_ev_qfini_info(sa, sw_index);