sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
{
if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
- struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
+ struct sfc_evq *mgmt_evq = sa->mgmt_evq;
if (mgmt_evq->init_state == SFC_EVQ_STARTED)
sfc_ev_qpoll(mgmt_evq);
return efx_ev_qprime(evq->common, evq->read_ptr);
}
+/* Event queue HW index allocation scheme is described in sfc_ev.h. */
int
-sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
{
- const struct sfc_evq_info *evq_info;
- struct sfc_evq *evq;
+ struct sfc_adapter *sa = evq->sa;
efsys_mem_t *esmp;
uint32_t evq_flags = sa->evq_flags;
unsigned int total_delay_us;
unsigned int delay_us;
int rc;
- sfc_log_init(sa, "sw_index=%u", sw_index);
+ sfc_log_init(sa, "hw_index=%u", hw_index);
- evq_info = &sa->evq_info[sw_index];
- evq = evq_info->evq;
esmp = &evq->mem;
+ evq->evq_index = hw_index;
+
/* Clear all events */
(void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
- if (sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index)
+ if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index)
evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
else
evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
/* Create the common code event queue */
- rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq->entries,
+ rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries,
0 /* unused on EF10 */, 0, evq_flags,
&evq->common);
if (rc != 0)
}
void
-sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_ev_qstop(struct sfc_evq *evq)
{
- const struct sfc_evq_info *evq_info;
- struct sfc_evq *evq;
-
- sfc_log_init(sa, "sw_index=%u", sw_index);
-
- SFC_ASSERT(sw_index < sa->evq_count);
+ if (evq == NULL)
+ return;
- evq_info = &sa->evq_info[sw_index];
- evq = evq_info->evq;
+ sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index);
- if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
+ if (evq->init_state != SFC_EVQ_STARTED)
return;
evq->init_state = SFC_EVQ_INITIALIZED;
evq->exception = B_FALSE;
efx_ev_qdestroy(evq->common);
+
+ evq->evq_index = 0;
}
static void
/* Start management EVQ used for global events */
rte_spinlock_lock(&sa->mgmt_evq_lock);
- rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
+ rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
if (rc != 0)
goto fail_mgmt_evq_start;
if (sa->intr.lsc_intr) {
- rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq);
+ rc = sfc_ev_qprime(sa->mgmt_evq);
if (rc != 0)
goto fail_evq0_prime;
}
return 0;
fail_evq0_prime:
- sfc_ev_qstop(sa, 0);
+ sfc_ev_qstop(sa->mgmt_evq);
fail_mgmt_evq_start:
rte_spinlock_unlock(&sa->mgmt_evq_lock);
void
sfc_ev_stop(struct sfc_adapter *sa)
{
- unsigned int sw_index;
-
sfc_log_init(sa, "entry");
sfc_ev_mgmt_periodic_qpoll_stop(sa);
- /* Make sure that all event queues are stopped */
- sw_index = sa->evq_count;
- while (sw_index-- > 0) {
- if (sw_index == sa->mgmt_evq_index) {
- /* Locks are required for the management EVQ */
- rte_spinlock_lock(&sa->mgmt_evq_lock);
- sfc_ev_qstop(sa, sa->mgmt_evq_index);
- rte_spinlock_unlock(&sa->mgmt_evq_lock);
- } else {
- sfc_ev_qstop(sa, sw_index);
- }
- }
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+ sfc_ev_qstop(sa->mgmt_evq);
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
efx_ev_fini(sa->nic);
}
int
-sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+sfc_ev_qinit(struct sfc_adapter *sa,
enum sfc_evq_type type, unsigned int type_index,
- unsigned int entries, int socket_id)
+ unsigned int entries, int socket_id, struct sfc_evq **evqp)
{
- struct sfc_evq_info *evq_info;
struct sfc_evq *evq;
int rc;
- sfc_log_init(sa, "sw_index=%u type=%s type_index=%u",
- sw_index, sfc_evq_type2str(type), type_index);
-
- evq_info = &sa->evq_info[sw_index];
+ sfc_log_init(sa, "type=%s type_index=%u",
+ sfc_evq_type2str(type), type_index);
SFC_ASSERT(rte_is_power_of_2(entries));
goto fail_evq_alloc;
evq->sa = sa;
- evq->evq_index = sw_index;
evq->type = type;
evq->entries = entries;
evq->init_state = SFC_EVQ_INITIALIZED;
- evq_info->evq = evq;
+ sa->evq_count++;
+
+ *evqp = evq;
return 0;
}
void
-sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_ev_qfini(struct sfc_evq *evq)
{
- struct sfc_evq *evq;
-
- sfc_log_init(sa, "sw_index=%u", sw_index);
-
- evq = sa->evq_info[sw_index].evq;
+ struct sfc_adapter *sa = evq->sa;
SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
- sa->evq_info[sw_index].evq = NULL;
-
sfc_dma_free(sa, &evq->mem);
rte_free(evq);
-}
-
-static int
-sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
-{
- sfc_log_init(sa, "sw_index=%u", sw_index);
- return 0;
+ SFC_ASSERT(sa->evq_count > 0);
+ sa->evq_count--;
}
static int
return 0;
}
-static void
-sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
-{
- sfc_log_init(sa, "sw_index=%u", sw_index);
-
- /* Nothing to cleanup */
-}
-
int
sfc_ev_init(struct sfc_adapter *sa)
{
int rc;
- unsigned int sw_index;
sfc_log_init(sa, "entry");
goto fail_kvarg_perf_profile;
}
- sa->evq_count = sfc_ev_qcount(sa);
sa->mgmt_evq_index = 0;
rte_spinlock_init(&sa->mgmt_evq_lock);
- /* Allocate EVQ info array */
- rc = ENOMEM;
- sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
- sizeof(struct sfc_evq_info), 0,
- sa->socket_id);
- if (sa->evq_info == NULL)
- goto fail_evqs_alloc;
-
- for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
- rc = sfc_ev_qinit_info(sa, sw_index);
- if (rc != 0)
- goto fail_ev_qinit_info;
- }
-
- rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_EVQ_TYPE_MGMT, 0,
- SFC_MGMT_EVQ_ENTRIES, sa->socket_id);
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES,
+ sa->socket_id, &sa->mgmt_evq);
if (rc != 0)
goto fail_mgmt_evq_init;
return 0;
fail_mgmt_evq_init:
-fail_ev_qinit_info:
- while (sw_index-- > 0)
- sfc_ev_qfini_info(sa, sw_index);
-
- rte_free(sa->evq_info);
- sa->evq_info = NULL;
-
-fail_evqs_alloc:
- sa->evq_count = 0;
fail_kvarg_perf_profile:
sfc_log_init(sa, "failed %d", rc);
void
sfc_ev_fini(struct sfc_adapter *sa)
{
- int sw_index;
-
sfc_log_init(sa, "entry");
- /* Cleanup all event queues */
- sw_index = sa->evq_count;
- while (--sw_index >= 0) {
- if (sa->evq_info[sw_index].evq != NULL)
- sfc_ev_qfini(sa, sw_index);
- sfc_ev_qfini_info(sa, sw_index);
- }
+ sfc_ev_qfini(sa->mgmt_evq);
- rte_free(sa->evq_info);
- sa->evq_info = NULL;
- sa->evq_count = 0;
+ if (sa->evq_count != 0)
+ sfc_err(sa, "%u EvQs are not destroyed before detach",
+ sa->evq_count);
}
unsigned int entries;
};
-struct sfc_evq_info {
- /* NUMA-aware EVQ data structure used on datapath */
- struct sfc_evq *evq;
-};
-
/*
* Functions below define event queue to transmit/receive queue and vice
* versa mapping.
+ * Own event queue is allocated for management, each Rx and each Tx queue.
+ * Zero event queue is used for management events.
+ * Rx event queues from 1 to RxQ number follow management event queue.
+ * Tx event queues follow Rx event queues.
*/
-static inline unsigned int
-sfc_ev_qcount(struct sfc_adapter *sa)
-{
- const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
-
- /*
- * One management EVQ for global events.
- * Own EVQ for each Tx and Rx queue.
- */
- return 1 + dev_data->nb_rx_queues + dev_data->nb_tx_queues;
-}
-
static inline unsigned int
sfc_evq_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa,
unsigned int rxq_sw_index)
int sfc_ev_start(struct sfc_adapter *sa);
void sfc_ev_stop(struct sfc_adapter *sa);
-int sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+int sfc_ev_qinit(struct sfc_adapter *sa,
enum sfc_evq_type type, unsigned int type_index,
- unsigned int entries, int socket_id);
-void sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index);
-int sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index);
-void sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index);
+ unsigned int entries, int socket_id, struct sfc_evq **evqp);
+void sfc_ev_qfini(struct sfc_evq *evq);
+int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index);
+void sfc_ev_qstop(struct sfc_evq *evq);
int sfc_ev_qprime(struct sfc_evq *evq);
void sfc_ev_qpoll(struct sfc_evq *evq);
evq = rxq->evq;
- rc = sfc_ev_qstart(sa, evq->evq_index);
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
if (rc != 0)
goto fail_ev_qstart;
sfc_rx_qflush(sa, sw_index);
fail_rx_qcreate:
- sfc_ev_qstop(sa, evq->evq_index);
+ sfc_ev_qstop(evq);
fail_ev_qstart:
return rc;
efx_rx_qdestroy(rxq->common);
- sfc_ev_qstop(sa, rxq->evq->evq_index);
+ sfc_ev_qstop(rxq->evq);
}
static int
int rc;
uint16_t buf_size;
struct sfc_rxq_info *rxq_info;
- unsigned int evq_index;
struct sfc_evq *evq;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
- evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
-
- rc = sfc_ev_qinit(sa, evq_index, SFC_EVQ_TYPE_RX, sw_index,
- rxq_info->entries, socket_id);
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
+ rxq_info->entries, socket_id, &evq);
if (rc != 0)
goto fail_ev_qinit;
- evq = sa->evq_info[evq_index].evq;
-
rc = ENOMEM;
rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
socket_id);
rte_free(rxq);
fail_rxq_alloc:
- sfc_ev_qfini(sa, evq_index);
+ sfc_ev_qfini(evq);
fail_ev_qinit:
rxq_info->entries = 0;
rxq_info->entries = 0;
sfc_dma_free(sa, &rxq->mem);
- rte_free(rxq);
- sfc_ev_qfini(sa, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
+ sfc_ev_qfini(rxq->evq);
+ rxq->evq = NULL;
+
+ rte_free(rxq);
}
#if EFSYS_OPT_RX_SCALE
struct sfc_txq_info *txq_info;
struct sfc_evq *evq;
struct sfc_txq *txq;
- unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index);
int rc = 0;
struct sfc_dp_tx_qcreate_info info;
SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
txq_info->entries = nb_tx_desc;
- rc = sfc_ev_qinit(sa, evq_index, SFC_EVQ_TYPE_TX, sw_index,
- txq_info->entries, socket_id);
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,
+ txq_info->entries, socket_id, &evq);
if (rc != 0)
goto fail_ev_qinit;
- evq = sa->evq_info[evq_index].evq;
-
rc = ENOMEM;
txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
if (txq == NULL)
rte_free(txq);
fail_txq_alloc:
- sfc_ev_qfini(sa, evq_index);
+ sfc_ev_qfini(evq);
fail_ev_qinit:
txq_info->entries = 0;
txq_info->entries = 0;
sfc_dma_free(sa, &txq->mem);
- rte_free(txq);
- sfc_ev_qfini(sa, sfc_evq_index_by_txq_sw_index(sa, sw_index));
+ sfc_ev_qfini(txq->evq);
+ txq->evq = NULL;
+
+ rte_free(txq);
}
static int
evq = txq->evq;
- rc = sfc_ev_qstart(sa, evq->evq_index);
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
if (rc != 0)
goto fail_ev_qstart;
efx_tx_qdestroy(txq->common);
fail_tx_qcreate:
- sfc_ev_qstop(sa, evq->evq_index);
+ sfc_ev_qstop(evq);
fail_ev_qstart:
return rc;
efx_tx_qdestroy(txq->common);
- sfc_ev_qstop(sa, txq->evq->evq_index);
+ sfc_ev_qstop(txq->evq);
/*
* It seems to be used by DPDK for debug purposes only ('rte_ether')