From 9a75f75cb1f2432a9e735656fca167fecb057338 Mon Sep 17 00:00:00 2001 From: Andrew Rybchenko Date: Tue, 29 Nov 2016 16:19:11 +0000 Subject: [PATCH] net/sfc: maintain management event queue The event queue is required for device level events (e.g. link status change) and flush events. Provide thread-safe function to poll the event queue since it may be really done from different contexts. Signed-off-by: Andrew Rybchenko Reviewed-by: Andy Moreton Reviewed-by: Ferruh Yigit --- drivers/net/sfc/sfc.h | 1 + drivers/net/sfc/sfc_ev.c | 49 ++++++++++++++++++++++++++++++++++++---- drivers/net/sfc/sfc_ev.h | 2 ++ 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h index f6d0b81b6f..d56fa540e5 100644 --- a/drivers/net/sfc/sfc.h +++ b/drivers/net/sfc/sfc.h @@ -147,6 +147,7 @@ struct sfc_adapter { struct sfc_evq_info *evq_info; unsigned int mgmt_evq_index; + rte_spinlock_t mgmt_evq_lock; }; /* diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c index 42009c0f7e..a6a1ee22a8 100644 --- a/drivers/net/sfc/sfc_ev.c +++ b/drivers/net/sfc/sfc_ev.c @@ -215,6 +215,19 @@ sfc_ev_qpoll(struct sfc_evq *evq) /* Poll-mode driver does not re-prime the event queue for interrupts */ } +void +sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) +{ + if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { + struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq; + + if (mgmt_evq->init_state == SFC_EVQ_STARTED) + sfc_ev_qpoll(mgmt_evq); + + rte_spinlock_unlock(&sa->mgmt_evq_lock); + } +} + int sfc_ev_qprime(struct sfc_evq *evq) { @@ -326,13 +339,26 @@ sfc_ev_start(struct sfc_adapter *sa) if (rc != 0) goto fail_ev_init; + /* Start management EVQ used for global events */ + rte_spinlock_lock(&sa->mgmt_evq_lock); + + rc = sfc_ev_qstart(sa, sa->mgmt_evq_index); + if (rc != 0) + goto fail_mgmt_evq_start; + + rte_spinlock_unlock(&sa->mgmt_evq_lock); + /* - * Rx/Tx event queues are started/stopped when corresponding queue - * is started/stopped. + * Rx/Tx event queues are started/stopped when corresponding + * Rx/Tx queue is started/stopped. */ return 0; +fail_mgmt_evq_start: + rte_spinlock_unlock(&sa->mgmt_evq_lock); + efx_ev_fini(sa->nic); + fail_ev_init: sfc_log_init(sa, "failed %d", rc); return rc; @@ -347,8 +373,16 @@ sfc_ev_stop(struct sfc_adapter *sa) /* Make sure that all event queues are stopped */ sw_index = sa->evq_count; - while (sw_index-- > 0) - sfc_ev_qstop(sa, sw_index); + while (sw_index-- > 0) { + if (sw_index == sa->mgmt_evq_index) { + /* Locks are required for the management EVQ */ + rte_spinlock_lock(&sa->mgmt_evq_lock); + sfc_ev_qstop(sa, sa->mgmt_evq_index); + rte_spinlock_unlock(&sa->mgmt_evq_lock); + } else { + sfc_ev_qstop(sa, sw_index); + } + } efx_ev_fini(sa->nic); } @@ -442,6 +476,7 @@ sfc_ev_init(struct sfc_adapter *sa) sa->evq_count = sfc_ev_qcount(sa); sa->mgmt_evq_index = 0; + rte_spinlock_init(&sa->mgmt_evq_lock); /* Allocate EVQ info array */ rc = ENOMEM; @@ -457,6 +492,11 @@ sfc_ev_init(struct sfc_adapter *sa) goto fail_ev_qinit_info; } + rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES, + sa->socket_id); + if (rc != 0) + goto fail_mgmt_evq_init; + /* * Rx/Tx event queues are created/destroyed when corresponding * Rx/Tx queue is created/destroyed. @@ -464,6 +504,7 @@ sfc_ev_init(struct sfc_adapter *sa) return 0; +fail_mgmt_evq_init: fail_ev_qinit_info: while (sw_index-- > 0) sfc_ev_qfini_info(sa, sw_index); diff --git a/drivers/net/sfc/sfc_ev.h b/drivers/net/sfc/sfc_ev.h index 11478e90a1..8455fdae96 100644 --- a/drivers/net/sfc/sfc_ev.h +++ b/drivers/net/sfc/sfc_ev.h @@ -133,6 +133,8 @@ void sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index); int sfc_ev_qprime(struct sfc_evq *evq); void sfc_ev_qpoll(struct sfc_evq *evq); +void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa); + #ifdef __cplusplus } #endif -- 2.20.1