net/sfc: use NIC EVQ descs limits instead of defines
[dpdk.git] / drivers / net / sfc / sfc_ev.c
index b29eb2f..939766d 100644 (file)
@@ -1,32 +1,10 @@
-/*-
- *   BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
  *
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
  * All rights reserved.
  *
  * This software was jointly developed between OKTET Labs (under contract
  * for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <rte_debug.h>
@@ -180,8 +158,37 @@ sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
        dp_rxq = evq->dp_rxq;
        SFC_ASSERT(dp_rxq != NULL);
 
-       SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL);
-       return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
+       SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL);
+       return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, id);
+}
+
+static boolean_t
+sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id,
+                uint32_t pkt_count, uint16_t flags)
+{
+       struct sfc_evq *evq = arg;
+
+       sfc_err(evq->sa,
+               "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
+               evq->evq_index, label, id, pkt_count, flags);
+       return B_TRUE;
+}
+
+/* It is not actually used on datapath, but required on RxQ flush */
+static boolean_t
+sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id,
+               __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags)
+{
+       struct sfc_evq *evq = arg;
+       struct sfc_dp_rxq *dp_rxq;
+
+       dp_rxq = evq->dp_rxq;
+       SFC_ASSERT(dp_rxq != NULL);
+
+       if (evq->sa->priv.dp_rx->qrx_ps_ev != NULL)
+               return evq->sa->priv.dp_rx->qrx_ps_ev(dp_rxq, id);
+       else
+               return B_FALSE;
 }
 
 static boolean_t
@@ -232,8 +239,8 @@ sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
        dp_txq = evq->dp_txq;
        SFC_ASSERT(dp_txq != NULL);
 
-       SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL);
-       return evq->sa->dp_tx->qtx_ev(dp_txq, id);
+       SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL);
+       return evq->sa->priv.dp_tx->qtx_ev(dp_txq, id);
 }
 
 static boolean_t
@@ -286,7 +293,9 @@ sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
        SFC_ASSERT(rxq != NULL);
        SFC_ASSERT(rxq->hw_index == rxq_hw_index);
        SFC_ASSERT(rxq->evq == evq);
-       sfc_rx_qflush_done(rxq);
+       RTE_SET_USED(rxq);
+
+       sfc_rx_qflush_done(sfc_rxq_info_by_dp_rxq(dp_rxq));
 
        return B_FALSE;
 }
@@ -315,7 +324,9 @@ sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
        SFC_ASSERT(rxq != NULL);
        SFC_ASSERT(rxq->hw_index == rxq_hw_index);
        SFC_ASSERT(rxq->evq == evq);
-       sfc_rx_qflush_failed(rxq);
+       RTE_SET_USED(rxq);
+
+       sfc_rx_qflush_failed(sfc_rxq_info_by_dp_rxq(dp_rxq));
 
        return B_FALSE;
 }
@@ -344,7 +355,9 @@ sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
        SFC_ASSERT(txq != NULL);
        SFC_ASSERT(txq->hw_index == txq_hw_index);
        SFC_ASSERT(txq->evq == evq);
-       sfc_tx_qflush_done(txq);
+       RTE_SET_USED(txq);
+
+       sfc_tx_qflush_done(sfc_txq_info_by_dp_txq(dp_txq));
 
        return B_FALSE;
 }
@@ -404,27 +417,11 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
 {
        struct sfc_evq *evq = arg;
        struct sfc_adapter *sa = evq->sa;
-       struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
        struct rte_eth_link new_link;
-       uint64_t new_link_u64;
-       uint64_t old_link_u64;
-
-       EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
 
        sfc_port_link_mode_to_info(link_mode, &new_link);
-
-       new_link_u64 = *(uint64_t *)&new_link;
-       do {
-               old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
-               if (old_link_u64 == new_link_u64)
-                       break;
-
-               if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
-                                       old_link_u64, new_link_u64)) {
-                       evq->sa->port.lsc_seq++;
-                       break;
-               }
-       } while (B_TRUE);
+       if (rte_eth_linkstatus_set(sa->eth_dev, &new_link))
+               evq->sa->port.lsc_seq++;
 
        return B_FALSE;
 }
@@ -432,6 +429,7 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
 static const efx_ev_callbacks_t sfc_ev_callbacks = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_nop_rx,
+       .eec_rx_ps              = sfc_ev_nop_rx_ps,
        .eec_tx                 = sfc_ev_nop_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_nop_rxq_flush_done,
@@ -447,6 +445,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks = {
 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_efx_rx,
+       .eec_rx_ps              = sfc_ev_nop_rx_ps,
        .eec_tx                 = sfc_ev_nop_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_rxq_flush_done,
@@ -462,6 +461,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_dp_rx,
+       .eec_rx_ps              = sfc_ev_dp_rx_ps,
        .eec_tx                 = sfc_ev_nop_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_rxq_flush_done,
@@ -477,6 +477,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_nop_rx,
+       .eec_rx_ps              = sfc_ev_nop_rx_ps,
        .eec_tx                 = sfc_ev_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_nop_rxq_flush_done,
@@ -492,6 +493,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
        .eec_initialized        = sfc_ev_initialized,
        .eec_rx                 = sfc_ev_nop_rx,
+       .eec_rx_ps              = sfc_ev_nop_rx_ps,
        .eec_tx                 = sfc_ev_dp_tx,
        .eec_exception          = sfc_ev_exception,
        .eec_rxq_flush_done     = sfc_ev_nop_rxq_flush_done,
@@ -565,10 +567,8 @@ void
 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
 {
        if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
-               struct sfc_evq *mgmt_evq = sa->mgmt_evq;
-
-               if (mgmt_evq->init_state == SFC_EVQ_STARTED)
-                       sfc_ev_qpoll(mgmt_evq);
+               if (sa->mgmt_evq_running)
+                       sfc_ev_qpoll(sa->mgmt_evq);
 
                rte_spinlock_unlock(&sa->mgmt_evq_lock);
        }
@@ -615,12 +615,14 @@ sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
 
        SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
        if (evq->dp_rxq != 0) {
-               if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+               if (strcmp(sa->priv.dp_rx->dp.name,
+                          SFC_KVARG_DATAPATH_EFX) == 0)
                        evq->callbacks = &sfc_ev_callbacks_efx_rx;
                else
                        evq->callbacks = &sfc_ev_callbacks_dp_rx;
        } else if (evq->dp_txq != 0) {
-               if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+               if (strcmp(sa->priv.dp_tx->dp.name,
+                          SFC_KVARG_DATAPATH_EFX) == 0)
                        evq->callbacks = &sfc_ev_callbacks_efx_tx;
                else
                        evq->callbacks = &sfc_ev_callbacks_dp_tx;
@@ -734,20 +736,26 @@ sfc_ev_start(struct sfc_adapter *sa)
                goto fail_ev_init;
 
        /* Start management EVQ used for global events */
-       rte_spinlock_lock(&sa->mgmt_evq_lock);
 
+       /*
+        * Management event queue start polls the queue, but it cannot
+        * interfere with other polling contexts since mgmt_evq_running
+        * is false yet.
+        */
        rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
        if (rc != 0)
                goto fail_mgmt_evq_start;
 
+       rte_spinlock_lock(&sa->mgmt_evq_lock);
+       sa->mgmt_evq_running = true;
+       rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
        if (sa->intr.lsc_intr) {
                rc = sfc_ev_qprime(sa->mgmt_evq);
                if (rc != 0)
                        goto fail_mgmt_evq_prime;
        }
 
-       rte_spinlock_unlock(&sa->mgmt_evq_lock);
-
        /*
         * Start management EVQ polling. If interrupts are disabled
         * (not used), it is required to process link status change
@@ -767,7 +775,6 @@ fail_mgmt_evq_prime:
        sfc_ev_qstop(sa->mgmt_evq);
 
 fail_mgmt_evq_start:
-       rte_spinlock_unlock(&sa->mgmt_evq_lock);
        efx_ev_fini(sa->nic);
 
 fail_ev_init:
@@ -783,9 +790,11 @@ sfc_ev_stop(struct sfc_adapter *sa)
        sfc_ev_mgmt_periodic_qpoll_stop(sa);
 
        rte_spinlock_lock(&sa->mgmt_evq_lock);
-       sfc_ev_qstop(sa->mgmt_evq);
+       sa->mgmt_evq_running = false;
        rte_spinlock_unlock(&sa->mgmt_evq_lock);
 
+       sfc_ev_qstop(sa->mgmt_evq);
+
        efx_ev_fini(sa->nic);
 }
 
@@ -854,7 +863,7 @@ static int
 sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
                               const char *value_str, void *opaque)
 {
-       uint64_t *value = opaque;
+       uint32_t *value = opaque;
 
        if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
                *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
@@ -888,7 +897,7 @@ sfc_ev_attach(struct sfc_adapter *sa)
        sa->mgmt_evq_index = 0;
        rte_spinlock_init(&sa->mgmt_evq_lock);
 
-       rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES,
+       rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, sa->evq_min_entries,
                          sa->socket_id, &sa->mgmt_evq);
        if (rc != 0)
                goto fail_mgmt_evq_init;