X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ev.c;h=a16dc27b380e9f54c8d3b1008f2813cdcb9c64f6;hb=8ae36890ca06bf66972f88d2f44040888ac94c6e;hp=24071b26c42087ecb705e74b8e8fe779ad49e2be;hpb=8b00f426eb6614d5c01fec95cdc271700f85f886;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c index 24071b26c4..a16dc27b38 100644 --- a/drivers/net/sfc/sfc_ev.c +++ b/drivers/net/sfc/sfc_ev.c @@ -55,6 +55,21 @@ /* Management event queue polling period in microseconds */ #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S) +static const char * +sfc_evq_type2str(enum sfc_evq_type type) +{ + switch (type) { + case SFC_EVQ_TYPE_MGMT: + return "mgmt-evq"; + case SFC_EVQ_TYPE_RX: + return "rx-evq"; + case SFC_EVQ_TYPE_TX: + return "tx-evq"; + default: + SFC_ASSERT(B_FALSE); + return NULL; + } +} static boolean_t sfc_ev_initialized(void *arg) @@ -222,8 +237,7 @@ sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id) } static boolean_t -sfc_ev_exception(void *arg, __rte_unused uint32_t code, - __rte_unused uint32_t data) +sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data) { struct sfc_evq *evq = arg; @@ -551,7 +565,7 @@ void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) { if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { - struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq; + struct sfc_evq *mgmt_evq = sa->mgmt_evq; if (mgmt_evq->init_state == SFC_EVQ_STARTED) sfc_ev_qpoll(mgmt_evq); @@ -567,29 +581,34 @@ sfc_ev_qprime(struct sfc_evq *evq) return efx_ev_qprime(evq->common, evq->read_ptr); } +/* Event queue HW index allocation scheme is described in sfc_ev.h. */ int -sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index) +sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) { - const struct sfc_evq_info *evq_info; - struct sfc_evq *evq; + struct sfc_adapter *sa = evq->sa; efsys_mem_t *esmp; + uint32_t evq_flags = sa->evq_flags; unsigned int total_delay_us; unsigned int delay_us; int rc; - sfc_log_init(sa, "sw_index=%u", sw_index); + sfc_log_init(sa, "hw_index=%u", hw_index); - evq_info = &sa->evq_info[sw_index]; - evq = evq_info->evq; esmp = &evq->mem; + evq->evq_index = hw_index; + /* Clear all events */ - (void)memset((void *)esmp->esm_base, 0xff, - EFX_EVQ_SIZE(evq_info->entries)); + (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries)); + + if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) + evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; + else + evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; /* Create the common code event queue */ - rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries, - 0 /* unused on EF10 */, 0, evq_info->flags, + rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries, + 0 /* unused on EF10 */, 0, evq_flags, &evq->common); if (rc != 0) goto fail_ev_qcreate; @@ -651,19 +670,14 @@ fail_ev_qcreate: } void -sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index) +sfc_ev_qstop(struct sfc_evq *evq) { - const struct sfc_evq_info *evq_info; - struct sfc_evq *evq; - - sfc_log_init(sa, "sw_index=%u", sw_index); - - SFC_ASSERT(sw_index < sa->evq_count); + if (evq == NULL) + return; - evq_info = &sa->evq_info[sw_index]; - evq = evq_info->evq; + sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index); - if (evq == NULL || evq->init_state != SFC_EVQ_STARTED) + if (evq->init_state != SFC_EVQ_STARTED) return; evq->init_state = SFC_EVQ_INITIALIZED; @@ -672,6 +686,8 @@ sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index) evq->exception = B_FALSE; efx_ev_qdestroy(evq->common); + + evq->evq_index = 0; } static void @@ -720,12 +736,12 @@ sfc_ev_start(struct sfc_adapter *sa) /* Start management EVQ used for global events */ rte_spinlock_lock(&sa->mgmt_evq_lock); - rc = sfc_ev_qstart(sa, sa->mgmt_evq_index); + rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index); if (rc != 0) goto fail_mgmt_evq_start; if (sa->intr.lsc_intr) { - rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq); + rc = sfc_ev_qprime(sa->mgmt_evq); if (rc != 0) goto fail_evq0_prime; } @@ -748,7 +764,7 @@ sfc_ev_start(struct sfc_adapter *sa) return 0; fail_evq0_prime: - sfc_ev_qstop(sa, 0); + sfc_ev_qstop(sa->mgmt_evq); fail_mgmt_evq_start: rte_spinlock_unlock(&sa->mgmt_evq_lock); @@ -762,101 +778,76 @@ fail_ev_init: void sfc_ev_stop(struct sfc_adapter *sa) { - unsigned int sw_index; - sfc_log_init(sa, "entry"); sfc_ev_mgmt_periodic_qpoll_stop(sa); - /* Make sure that all event queues are stopped */ - sw_index = sa->evq_count; - while (sw_index-- > 0) { - if (sw_index == sa->mgmt_evq_index) { - /* Locks are required for the management EVQ */ - rte_spinlock_lock(&sa->mgmt_evq_lock); - sfc_ev_qstop(sa, sa->mgmt_evq_index); - rte_spinlock_unlock(&sa->mgmt_evq_lock); - } else { - sfc_ev_qstop(sa, sw_index); - } - } + rte_spinlock_lock(&sa->mgmt_evq_lock); + sfc_ev_qstop(sa->mgmt_evq); + rte_spinlock_unlock(&sa->mgmt_evq_lock); efx_ev_fini(sa->nic); } int -sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index, - unsigned int entries, int socket_id) +sfc_ev_qinit(struct sfc_adapter *sa, + enum sfc_evq_type type, unsigned int type_index, + unsigned int entries, int socket_id, struct sfc_evq **evqp) { - struct sfc_evq_info *evq_info; struct sfc_evq *evq; int rc; - sfc_log_init(sa, "sw_index=%u", sw_index); - - evq_info = &sa->evq_info[sw_index]; + sfc_log_init(sa, "type=%s type_index=%u", + sfc_evq_type2str(type), type_index); SFC_ASSERT(rte_is_power_of_2(entries)); - SFC_ASSERT(entries <= evq_info->max_entries); - evq_info->entries = entries; + rc = ENOMEM; evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, socket_id); if (evq == NULL) - return ENOMEM; + goto fail_evq_alloc; evq->sa = sa; - evq->evq_index = sw_index; + evq->type = type; + evq->entries = entries; /* Allocate DMA space */ - rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries), - socket_id, &evq->mem); + rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, + EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem); if (rc != 0) - return rc; + goto fail_dma_alloc; evq->init_state = SFC_EVQ_INITIALIZED; - evq_info->evq = evq; + sa->evq_count++; + + *evqp = evq; return 0; + +fail_dma_alloc: + rte_free(evq); + +fail_evq_alloc: + + sfc_log_init(sa, "failed %d", rc); + return rc; } void -sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index) +sfc_ev_qfini(struct sfc_evq *evq) { - struct sfc_evq *evq; - - sfc_log_init(sa, "sw_index=%u", sw_index); - - evq = sa->evq_info[sw_index].evq; + struct sfc_adapter *sa = evq->sa; SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); - sa->evq_info[sw_index].evq = NULL; - sfc_dma_free(sa, &evq->mem); rte_free(evq); -} -static int -sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) -{ - struct sfc_evq_info *evq_info = &sa->evq_info[sw_index]; - unsigned int max_entries; - - sfc_log_init(sa, "sw_index=%u", sw_index); - - max_entries = sfc_evq_max_entries(sa, sw_index); - SFC_ASSERT(rte_is_power_of_2(max_entries)); - - evq_info->max_entries = max_entries; - evq_info->flags = sa->evq_flags | - ((sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index) ? - EFX_EVQ_FLAGS_NOTIFY_INTERRUPT : - EFX_EVQ_FLAGS_NOTIFY_DISABLED); - - return 0; + SFC_ASSERT(sa->evq_count > 0); + sa->evq_count--; } static int @@ -877,19 +868,10 @@ sfc_kvarg_perf_profile_handler(__rte_unused const char *key, return 0; } -static void -sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index) -{ - sfc_log_init(sa, "sw_index=%u", sw_index); - - /* Nothing to cleanup */ -} - int -sfc_ev_init(struct sfc_adapter *sa) +sfc_ev_attach(struct sfc_adapter *sa) { int rc; - unsigned int sw_index; sfc_log_init(sa, "entry"); @@ -903,26 +885,11 @@ sfc_ev_init(struct sfc_adapter *sa) goto fail_kvarg_perf_profile; } - sa->evq_count = sfc_ev_qcount(sa); sa->mgmt_evq_index = 0; rte_spinlock_init(&sa->mgmt_evq_lock); - /* Allocate EVQ info array */ - rc = ENOMEM; - sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count, - sizeof(struct sfc_evq_info), 0, - sa->socket_id); - if (sa->evq_info == NULL) - goto fail_evqs_alloc; - - for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) { - rc = sfc_ev_qinit_info(sa, sw_index); - if (rc != 0) - goto fail_ev_qinit_info; - } - - rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES, - sa->socket_id); + rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES, + sa->socket_id, &sa->mgmt_evq); if (rc != 0) goto fail_mgmt_evq_init; @@ -934,15 +901,6 @@ sfc_ev_init(struct sfc_adapter *sa) return 0; fail_mgmt_evq_init: -fail_ev_qinit_info: - while (sw_index-- > 0) - sfc_ev_qfini_info(sa, sw_index); - - rte_free(sa->evq_info); - sa->evq_info = NULL; - -fail_evqs_alloc: - sa->evq_count = 0; fail_kvarg_perf_profile: sfc_log_init(sa, "failed %d", rc); @@ -950,21 +908,13 @@ fail_kvarg_perf_profile: } void -sfc_ev_fini(struct sfc_adapter *sa) +sfc_ev_detach(struct sfc_adapter *sa) { - int sw_index; - sfc_log_init(sa, "entry"); - /* Cleanup all event queues */ - sw_index = sa->evq_count; - while (--sw_index >= 0) { - if (sa->evq_info[sw_index].evq != NULL) - sfc_ev_qfini(sa, sw_index); - sfc_ev_qfini_info(sa, sw_index); - } + sfc_ev_qfini(sa->mgmt_evq); - rte_free(sa->evq_info); - sa->evq_info = NULL; - sa->evq_count = 0; + if (sa->evq_count != 0) + sfc_err(sa, "%u EvQs are not destroyed before detach", + sa->evq_count); }