X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ev.c;h=f93d30e5c24ee6b9888e83b999c8c67b163624ff;hb=a4a5cd21d20a38fc317d938b156324a6ad78d119;hp=24071b26c42087ecb705e74b8e8fe779ad49e2be;hpb=8b00f426eb6614d5c01fec95cdc271700f85f886;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c index 24071b26c4..f93d30e5c2 100644 --- a/drivers/net/sfc/sfc_ev.c +++ b/drivers/net/sfc/sfc_ev.c @@ -1,32 +1,10 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2016-2017 Solarflare Communications Inc. + * Copyright (c) 2016-2018 Solarflare Communications Inc. * All rights reserved. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include @@ -55,6 +33,21 @@ /* Management event queue polling period in microseconds */ #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S) +static const char * +sfc_evq_type2str(enum sfc_evq_type type) +{ + switch (type) { + case SFC_EVQ_TYPE_MGMT: + return "mgmt-evq"; + case SFC_EVQ_TYPE_RX: + return "rx-evq"; + case SFC_EVQ_TYPE_TX: + return "tx-evq"; + default: + SFC_ASSERT(B_FALSE); + return NULL; + } +} static boolean_t sfc_ev_initialized(void *arg) @@ -169,6 +162,35 @@ sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id, return evq->sa->dp_rx->qrx_ev(dp_rxq, id); } +static boolean_t +sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id, + uint32_t pkt_count, uint16_t flags) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, + "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x", + evq->evq_index, label, id, pkt_count, flags); + return B_TRUE; +} + +/* It is not actually used on datapath, but required on RxQ flush */ +static boolean_t +sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id, + __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags) +{ + struct sfc_evq *evq = arg; + struct sfc_dp_rxq *dp_rxq; + + dp_rxq = evq->dp_rxq; + SFC_ASSERT(dp_rxq != NULL); + + if (evq->sa->dp_rx->qrx_ps_ev != NULL) + return evq->sa->dp_rx->qrx_ps_ev(dp_rxq, id); + else + return B_FALSE; +} + static boolean_t sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id) { @@ -222,8 +244,7 @@ sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id) } static boolean_t -sfc_ev_exception(void *arg, __rte_unused uint32_t code, - __rte_unused uint32_t data) +sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data) { struct sfc_evq *evq = arg; @@ -390,27 +411,11 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) { struct sfc_evq *evq = arg; struct sfc_adapter *sa = evq->sa; - struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link; struct rte_eth_link new_link; - uint64_t new_link_u64; - uint64_t old_link_u64; - - EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); sfc_port_link_mode_to_info(link_mode, &new_link); - - new_link_u64 = *(uint64_t *)&new_link; - do { - old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link); - if (old_link_u64 == new_link_u64) - break; - - if (rte_atomic64_cmpset((volatile uint64_t *)dev_link, - old_link_u64, new_link_u64)) { - evq->sa->port.lsc_seq++; - break; - } - } while (B_TRUE); + if (rte_eth_linkstatus_set(sa->eth_dev, &new_link)) + evq->sa->port.lsc_seq++; return B_FALSE; } @@ -418,6 +423,7 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) static const efx_ev_callbacks_t sfc_ev_callbacks = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_nop_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, @@ -433,6 +439,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks = { static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_efx_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_nop_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_rxq_flush_done, @@ -448,6 +455,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_dp_rx, + .eec_rx_ps = sfc_ev_dp_rx_ps, .eec_tx = sfc_ev_nop_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_rxq_flush_done, @@ -463,6 +471,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, @@ -478,6 +487,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_dp_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, @@ -551,10 +561,8 @@ void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) { if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { - struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq; - - if (mgmt_evq->init_state == SFC_EVQ_STARTED) - sfc_ev_qpoll(mgmt_evq); + if (sa->mgmt_evq_running) + sfc_ev_qpoll(sa->mgmt_evq); rte_spinlock_unlock(&sa->mgmt_evq_lock); } @@ -567,29 +575,34 @@ sfc_ev_qprime(struct sfc_evq *evq) return efx_ev_qprime(evq->common, evq->read_ptr); } +/* Event queue HW index allocation scheme is described in sfc_ev.h. */ int -sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index) +sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) { - const struct sfc_evq_info *evq_info; - struct sfc_evq *evq; + struct sfc_adapter *sa = evq->sa; efsys_mem_t *esmp; + uint32_t evq_flags = sa->evq_flags; unsigned int total_delay_us; unsigned int delay_us; int rc; - sfc_log_init(sa, "sw_index=%u", sw_index); + sfc_log_init(sa, "hw_index=%u", hw_index); - evq_info = &sa->evq_info[sw_index]; - evq = evq_info->evq; esmp = &evq->mem; + evq->evq_index = hw_index; + /* Clear all events */ - (void)memset((void *)esmp->esm_base, 0xff, - EFX_EVQ_SIZE(evq_info->entries)); + (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries)); + + if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) + evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; + else + evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; /* Create the common code event queue */ - rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries, - 0 /* unused on EF10 */, 0, evq_info->flags, + rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries, + 0 /* unused on EF10 */, 0, evq_flags, &evq->common); if (rc != 0) goto fail_ev_qcreate; @@ -651,19 +664,14 @@ fail_ev_qcreate: } void -sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index) +sfc_ev_qstop(struct sfc_evq *evq) { - const struct sfc_evq_info *evq_info; - struct sfc_evq *evq; - - sfc_log_init(sa, "sw_index=%u", sw_index); - - SFC_ASSERT(sw_index < sa->evq_count); + if (evq == NULL) + return; - evq_info = &sa->evq_info[sw_index]; - evq = evq_info->evq; + sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index); - if (evq == NULL || evq->init_state != SFC_EVQ_STARTED) + if (evq->init_state != SFC_EVQ_STARTED) return; evq->init_state = SFC_EVQ_INITIALIZED; @@ -672,6 +680,8 @@ sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index) evq->exception = B_FALSE; efx_ev_qdestroy(evq->common); + + evq->evq_index = 0; } static void @@ -718,20 +728,26 @@ sfc_ev_start(struct sfc_adapter *sa) goto fail_ev_init; /* Start management EVQ used for global events */ - rte_spinlock_lock(&sa->mgmt_evq_lock); - rc = sfc_ev_qstart(sa, sa->mgmt_evq_index); + /* + * Management event queue start polls the queue, but it cannot + * interfere with other polling contexts since mgmt_evq_running + * is false yet. + */ + rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index); if (rc != 0) goto fail_mgmt_evq_start; + rte_spinlock_lock(&sa->mgmt_evq_lock); + sa->mgmt_evq_running = true; + rte_spinlock_unlock(&sa->mgmt_evq_lock); + if (sa->intr.lsc_intr) { - rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq); + rc = sfc_ev_qprime(sa->mgmt_evq); if (rc != 0) - goto fail_evq0_prime; + goto fail_mgmt_evq_prime; } - rte_spinlock_unlock(&sa->mgmt_evq_lock); - /* * Start management EVQ polling. If interrupts are disabled * (not used), it is required to process link status change @@ -747,11 +763,10 @@ sfc_ev_start(struct sfc_adapter *sa) return 0; -fail_evq0_prime: - sfc_ev_qstop(sa, 0); +fail_mgmt_evq_prime: + sfc_ev_qstop(sa->mgmt_evq); fail_mgmt_evq_start: - rte_spinlock_unlock(&sa->mgmt_evq_lock); efx_ev_fini(sa->nic); fail_ev_init: @@ -762,108 +777,85 @@ fail_ev_init: void sfc_ev_stop(struct sfc_adapter *sa) { - unsigned int sw_index; - sfc_log_init(sa, "entry"); sfc_ev_mgmt_periodic_qpoll_stop(sa); - /* Make sure that all event queues are stopped */ - sw_index = sa->evq_count; - while (sw_index-- > 0) { - if (sw_index == sa->mgmt_evq_index) { - /* Locks are required for the management EVQ */ - rte_spinlock_lock(&sa->mgmt_evq_lock); - sfc_ev_qstop(sa, sa->mgmt_evq_index); - rte_spinlock_unlock(&sa->mgmt_evq_lock); - } else { - sfc_ev_qstop(sa, sw_index); - } - } + rte_spinlock_lock(&sa->mgmt_evq_lock); + sa->mgmt_evq_running = false; + rte_spinlock_unlock(&sa->mgmt_evq_lock); + + sfc_ev_qstop(sa->mgmt_evq); efx_ev_fini(sa->nic); } int -sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index, - unsigned int entries, int socket_id) +sfc_ev_qinit(struct sfc_adapter *sa, + enum sfc_evq_type type, unsigned int type_index, + unsigned int entries, int socket_id, struct sfc_evq **evqp) { - struct sfc_evq_info *evq_info; struct sfc_evq *evq; int rc; - sfc_log_init(sa, "sw_index=%u", sw_index); - - evq_info = &sa->evq_info[sw_index]; + sfc_log_init(sa, "type=%s type_index=%u", + sfc_evq_type2str(type), type_index); SFC_ASSERT(rte_is_power_of_2(entries)); - SFC_ASSERT(entries <= evq_info->max_entries); - evq_info->entries = entries; + rc = ENOMEM; evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, socket_id); if (evq == NULL) - return ENOMEM; + goto fail_evq_alloc; evq->sa = sa; - evq->evq_index = sw_index; + evq->type = type; + evq->entries = entries; /* Allocate DMA space */ - rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries), - socket_id, &evq->mem); + rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, + EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem); if (rc != 0) - return rc; + goto fail_dma_alloc; evq->init_state = SFC_EVQ_INITIALIZED; - evq_info->evq = evq; + sa->evq_count++; + + *evqp = evq; return 0; + +fail_dma_alloc: + rte_free(evq); + +fail_evq_alloc: + + sfc_log_init(sa, "failed %d", rc); + return rc; } void -sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index) +sfc_ev_qfini(struct sfc_evq *evq) { - struct sfc_evq *evq; - - sfc_log_init(sa, "sw_index=%u", sw_index); - - evq = sa->evq_info[sw_index].evq; + struct sfc_adapter *sa = evq->sa; SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); - sa->evq_info[sw_index].evq = NULL; - sfc_dma_free(sa, &evq->mem); rte_free(evq); -} - -static int -sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) -{ - struct sfc_evq_info *evq_info = &sa->evq_info[sw_index]; - unsigned int max_entries; - - sfc_log_init(sa, "sw_index=%u", sw_index); - max_entries = sfc_evq_max_entries(sa, sw_index); - SFC_ASSERT(rte_is_power_of_2(max_entries)); - - evq_info->max_entries = max_entries; - evq_info->flags = sa->evq_flags | - ((sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index) ? - EFX_EVQ_FLAGS_NOTIFY_INTERRUPT : - EFX_EVQ_FLAGS_NOTIFY_DISABLED); - - return 0; + SFC_ASSERT(sa->evq_count > 0); + sa->evq_count--; } static int sfc_kvarg_perf_profile_handler(__rte_unused const char *key, const char *value_str, void *opaque) { - uint64_t *value = opaque; + uint32_t *value = opaque; if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; @@ -877,19 +869,10 @@ sfc_kvarg_perf_profile_handler(__rte_unused const char *key, return 0; } -static void -sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index) -{ - sfc_log_init(sa, "sw_index=%u", sw_index); - - /* Nothing to cleanup */ -} - int -sfc_ev_init(struct sfc_adapter *sa) +sfc_ev_attach(struct sfc_adapter *sa) { int rc; - unsigned int sw_index; sfc_log_init(sa, "entry"); @@ -903,26 +886,11 @@ sfc_ev_init(struct sfc_adapter *sa) goto fail_kvarg_perf_profile; } - sa->evq_count = sfc_ev_qcount(sa); sa->mgmt_evq_index = 0; rte_spinlock_init(&sa->mgmt_evq_lock); - /* Allocate EVQ info array */ - rc = ENOMEM; - sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count, - sizeof(struct sfc_evq_info), 0, - sa->socket_id); - if (sa->evq_info == NULL) - goto fail_evqs_alloc; - - for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) { - rc = sfc_ev_qinit_info(sa, sw_index); - if (rc != 0) - goto fail_ev_qinit_info; - } - - rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES, - sa->socket_id); + rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES, + sa->socket_id, &sa->mgmt_evq); if (rc != 0) goto fail_mgmt_evq_init; @@ -934,15 +902,6 @@ sfc_ev_init(struct sfc_adapter *sa) return 0; fail_mgmt_evq_init: -fail_ev_qinit_info: - while (sw_index-- > 0) - sfc_ev_qfini_info(sa, sw_index); - - rte_free(sa->evq_info); - sa->evq_info = NULL; - -fail_evqs_alloc: - sa->evq_count = 0; fail_kvarg_perf_profile: sfc_log_init(sa, "failed %d", rc); @@ -950,21 +909,13 @@ fail_kvarg_perf_profile: } void -sfc_ev_fini(struct sfc_adapter *sa) +sfc_ev_detach(struct sfc_adapter *sa) { - int sw_index; - sfc_log_init(sa, "entry"); - /* Cleanup all event queues */ - sw_index = sa->evq_count; - while (--sw_index >= 0) { - if (sa->evq_info[sw_index].evq != NULL) - sfc_ev_qfini(sa, sw_index); - sfc_ev_qfini_info(sa, sw_index); - } + sfc_ev_qfini(sa->mgmt_evq); - rte_free(sa->evq_info); - sa->evq_info = NULL; - sa->evq_count = 0; + if (sa->evq_count != 0) + sfc_err(sa, "%u EvQs are not destroyed before detach", + sa->evq_count); }