summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
98200dd)
The event queue is required for device level events (e.g. link status
change) and flush events.
Provide thread-safe function to poll the event queue since it may be
really done from different contexts.
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Andy Moreton <amoreton@solarflare.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
struct sfc_evq_info *evq_info;
unsigned int mgmt_evq_index;
struct sfc_evq_info *evq_info;
unsigned int mgmt_evq_index;
+ rte_spinlock_t mgmt_evq_lock;
/* Poll-mode driver does not re-prime the event queue for interrupts */
}
/* Poll-mode driver does not re-prime the event queue for interrupts */
}
+void
+sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
+{
+ if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
+ struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
+
+ if (mgmt_evq->init_state == SFC_EVQ_STARTED)
+ sfc_ev_qpoll(mgmt_evq);
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ }
+}
+
int
sfc_ev_qprime(struct sfc_evq *evq)
{
int
sfc_ev_qprime(struct sfc_evq *evq)
{
if (rc != 0)
goto fail_ev_init;
if (rc != 0)
goto fail_ev_init;
+ /* Start management EVQ used for global events */
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+
+ rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
+ if (rc != 0)
+ goto fail_mgmt_evq_start;
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
- * Rx/Tx event queues are started/stopped when corresponding queue
- * is started/stopped.
+ * Rx/Tx event queues are started/stopped when corresponding
+ * Rx/Tx queue is started/stopped.
+fail_mgmt_evq_start:
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ efx_ev_fini(sa->nic);
+
fail_ev_init:
sfc_log_init(sa, "failed %d", rc);
return rc;
fail_ev_init:
sfc_log_init(sa, "failed %d", rc);
return rc;
/* Make sure that all event queues are stopped */
sw_index = sa->evq_count;
/* Make sure that all event queues are stopped */
sw_index = sa->evq_count;
- while (sw_index-- > 0)
- sfc_ev_qstop(sa, sw_index);
+ while (sw_index-- > 0) {
+ if (sw_index == sa->mgmt_evq_index) {
+ /* Locks are required for the management EVQ */
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+ sfc_ev_qstop(sa, sa->mgmt_evq_index);
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ } else {
+ sfc_ev_qstop(sa, sw_index);
+ }
+ }
sa->evq_count = sfc_ev_qcount(sa);
sa->mgmt_evq_index = 0;
sa->evq_count = sfc_ev_qcount(sa);
sa->mgmt_evq_index = 0;
+ rte_spinlock_init(&sa->mgmt_evq_lock);
/* Allocate EVQ info array */
rc = ENOMEM;
/* Allocate EVQ info array */
rc = ENOMEM;
goto fail_ev_qinit_info;
}
goto fail_ev_qinit_info;
}
+ rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
+ sa->socket_id);
+ if (rc != 0)
+ goto fail_mgmt_evq_init;
+
/*
* Rx/Tx event queues are created/destroyed when corresponding
* Rx/Tx queue is created/destroyed.
/*
* Rx/Tx event queues are created/destroyed when corresponding
* Rx/Tx queue is created/destroyed.
fail_ev_qinit_info:
while (sw_index-- > 0)
sfc_ev_qfini_info(sa, sw_index);
fail_ev_qinit_info:
while (sw_index-- > 0)
sfc_ev_qfini_info(sa, sw_index);
int sfc_ev_qprime(struct sfc_evq *evq);
void sfc_ev_qpoll(struct sfc_evq *evq);
int sfc_ev_qprime(struct sfc_evq *evq);
void sfc_ev_qpoll(struct sfc_evq *evq);
+void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa);
+
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
}
#endif