net/sfc: implement Rx queue start and stop operations
[dpdk.git] / drivers / net / sfc / sfc_ev.c
index 42009c0..2bcc2be 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <rte_debug.h>
 #include <rte_cycles.h>
+#include <rte_alarm.h>
 
 #include "efx.h"
 
@@ -36,6 +37,7 @@
 #include "sfc_debug.h"
 #include "sfc_log.h"
 #include "sfc_ev.h"
+#include "sfc_rx.h"
 
 
 /* Initial delay when waiting for event queue init complete event */
@@ -45,6 +47,9 @@
 /* Event queue init approx timeout */
 #define SFC_EVQ_INIT_TIMEOUT_US                (2 * US_PER_S)
 
+/* Management event queue polling period in microseconds */
+#define SFC_MGMT_EV_QPOLL_PERIOD_US    (US_PER_S)
+
 
 static boolean_t
 sfc_ev_initialized(void *arg)
@@ -110,20 +115,30 @@ static boolean_t
 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
 {
        struct sfc_evq *evq = arg;
+       struct sfc_rxq *rxq;
 
-       sfc_err(evq->sa, "EVQ %u unexpected Rx flush done event",
-               evq->evq_index);
-       return B_TRUE;
+       rxq = evq->rxq;
+       SFC_ASSERT(rxq != NULL);
+       SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+       SFC_ASSERT(rxq->evq == evq);
+       sfc_rx_qflush_done(rxq);
+
+       return B_FALSE;
 }
 
 static boolean_t
 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
 {
        struct sfc_evq *evq = arg;
+       struct sfc_rxq *rxq;
 
-       sfc_err(evq->sa, "EVQ %u unexpected Rx flush failed event",
-               evq->evq_index);
-       return B_TRUE;
+       rxq = evq->rxq;
+       SFC_ASSERT(rxq != NULL);
+       SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+       SFC_ASSERT(rxq->evq == evq);
+       sfc_rx_qflush_failed(rxq);
+
+       return B_FALSE;
 }
 
 static boolean_t
@@ -177,13 +192,19 @@ sfc_ev_timer(void *arg, uint32_t index)
 }
 
 static boolean_t
-sfc_ev_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
+sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
 {
        struct sfc_evq *evq = arg;
+       struct sfc_adapter *sa = evq->sa;
+       struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
+       struct rte_eth_link new_link;
 
-       sfc_err(evq->sa, "EVQ %u unexpected link change",
-               evq->evq_index);
-       return B_TRUE;
+       EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+
+       sfc_port_link_mode_to_info(link_mode, &new_link);
+       rte_atomic64_set((rte_atomic64_t *)dev_link, *(uint64_t *)&new_link);
+
+       return B_FALSE;
 }
 
 static const efx_ev_callbacks_t sfc_ev_callbacks = {
@@ -215,6 +236,19 @@ sfc_ev_qpoll(struct sfc_evq *evq)
        /* Poll-mode driver does not re-prime the event queue for interrupts */
 }
 
+void
+sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
+{
+       if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
+               struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
+
+               if (mgmt_evq->init_state == SFC_EVQ_STARTED)
+                       sfc_ev_qpoll(mgmt_evq);
+
+               rte_spinlock_unlock(&sa->mgmt_evq_lock);
+       }
+}
+
 int
 sfc_ev_qprime(struct sfc_evq *evq)
 {
@@ -315,6 +349,34 @@ sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
        efx_ev_qdestroy(evq->common);
 }
 
+static void
+sfc_ev_mgmt_periodic_qpoll(void *arg)
+{
+       struct sfc_adapter *sa = arg;
+       int rc;
+
+       sfc_ev_mgmt_qpoll(sa);
+
+       rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
+                              sfc_ev_mgmt_periodic_qpoll, sa);
+       if (rc != 0)
+               sfc_panic(sa,
+                         "cannot rearm management EVQ polling alarm (rc=%d)",
+                         rc);
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
+{
+       sfc_ev_mgmt_periodic_qpoll(sa);
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
+{
+       rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
+}
+
 int
 sfc_ev_start(struct sfc_adapter *sa)
 {
@@ -326,13 +388,34 @@ sfc_ev_start(struct sfc_adapter *sa)
        if (rc != 0)
                goto fail_ev_init;
 
+       /* Start management EVQ used for global events */
+       rte_spinlock_lock(&sa->mgmt_evq_lock);
+
+       rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
+       if (rc != 0)
+               goto fail_mgmt_evq_start;
+
+       rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
+       /*
+        * Start management EVQ polling. If interrupts are disabled
+        * (not used), it is required to process link status change
+        * and other device level events to avoid unrecoverable
+        * error because the event queue overflow.
+        */
+       sfc_ev_mgmt_periodic_qpoll_start(sa);
+
        /*
-        * Rx/Tx event queues are started/stopped when corresponding queue
-        * is started/stopped.
+        * Rx/Tx event queues are started/stopped when corresponding
+        * Rx/Tx queue is started/stopped.
         */
 
        return 0;
 
+fail_mgmt_evq_start:
+       rte_spinlock_unlock(&sa->mgmt_evq_lock);
+       efx_ev_fini(sa->nic);
+
 fail_ev_init:
        sfc_log_init(sa, "failed %d", rc);
        return rc;
@@ -345,10 +428,20 @@ sfc_ev_stop(struct sfc_adapter *sa)
 
        sfc_log_init(sa, "entry");
 
+       sfc_ev_mgmt_periodic_qpoll_stop(sa);
+
        /* Make sure that all event queues are stopped */
        sw_index = sa->evq_count;
-       while (sw_index-- > 0)
-               sfc_ev_qstop(sa, sw_index);
+       while (sw_index-- > 0) {
+               if (sw_index == sa->mgmt_evq_index) {
+                       /* Locks are required for the management EVQ */
+                       rte_spinlock_lock(&sa->mgmt_evq_lock);
+                       sfc_ev_qstop(sa, sa->mgmt_evq_index);
+                       rte_spinlock_unlock(&sa->mgmt_evq_lock);
+               } else {
+                       sfc_ev_qstop(sa, sw_index);
+               }
+       }
 
        efx_ev_fini(sa->nic);
 }
@@ -442,6 +535,7 @@ sfc_ev_init(struct sfc_adapter *sa)
 
        sa->evq_count = sfc_ev_qcount(sa);
        sa->mgmt_evq_index = 0;
+       rte_spinlock_init(&sa->mgmt_evq_lock);
 
        /* Allocate EVQ info array */
        rc = ENOMEM;
@@ -457,6 +551,11 @@ sfc_ev_init(struct sfc_adapter *sa)
                        goto fail_ev_qinit_info;
        }
 
+       rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
+                         sa->socket_id);
+       if (rc != 0)
+               goto fail_mgmt_evq_init;
+
        /*
         * Rx/Tx event queues are created/destroyed when corresponding
         * Rx/Tx queue is created/destroyed.
@@ -464,6 +563,7 @@ sfc_ev_init(struct sfc_adapter *sa)
 
        return 0;
 
+fail_mgmt_evq_init:
 fail_ev_qinit_info:
        while (sw_index-- > 0)
                sfc_ev_qfini_info(sa, sw_index);