net/sfc: support multiple device probe
[dpdk.git] / drivers / net / sfc / sfc_repr.c
index fa53a72..4dbb6bd 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <stdint.h>
 
+#include <rte_mbuf.h>
 #include <rte_ethdev.h>
 #include <rte_malloc.h>
 #include <ethdev_driver.h>
@@ -21,6 +22,7 @@
 #include "sfc_ethdev_state.h"
 #include "sfc_repr_proxy_api.h"
 #include "sfc_switch.h"
+#include "sfc_dp_tx.h"
 
 /** Multi-process shared representor private data */
 struct sfc_repr_shared {
@@ -30,6 +32,17 @@ struct sfc_repr_shared {
        uint16_t                switch_port_id;
 };
 
+struct sfc_repr_rxq {
+       /* Datapath members */
+       struct rte_ring                 *ring;
+};
+
+struct sfc_repr_txq {
+       /* Datapath members */
+       struct rte_ring                 *ring;
+       efx_mport_id_t                  egress_mport;
+};
+
 /** Primary process representor private data */
 struct sfc_repr {
        /**
@@ -50,6 +63,14 @@ struct sfc_repr {
                SFC_GENERIC_LOG(ERR, __VA_ARGS__);                      \
        } while (0)
 
+#define sfcr_warn(sr, ...) \
+       do {                                                            \
+               const struct sfc_repr *_sr = (sr);                      \
+                                                                       \
+               (void)_sr;                                              \
+               SFC_GENERIC_LOG(WARNING, __VA_ARGS__);                  \
+       } while (0)
+
 #define sfcr_info(sr, ...) \
        do {                                                            \
                const struct sfc_repr *_sr = (sr);                      \
@@ -117,6 +138,179 @@ sfc_repr_lock_fini(__rte_unused struct sfc_repr *sr)
        /* Just for symmetry of the API */
 }
 
+static void
+sfc_repr_rx_queue_stop(void *queue)
+{
+       struct sfc_repr_rxq *rxq = queue;
+
+       if (rxq == NULL)
+               return;
+
+       rte_ring_reset(rxq->ring);
+}
+
+static void
+sfc_repr_tx_queue_stop(void *queue)
+{
+       struct sfc_repr_txq *txq = queue;
+
+       if (txq == NULL)
+               return;
+
+       rte_ring_reset(txq->ring);
+}
+
+static int
+sfc_repr_start(struct rte_eth_dev *dev)
+{
+       struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+       struct sfc_repr_shared *srs;
+       int ret;
+
+       sfcr_info(sr, "entry");
+
+       SFC_ASSERT(sfc_repr_lock_is_locked(sr));
+
+       switch (sr->state) {
+       case SFC_ETHDEV_CONFIGURED:
+               break;
+       case SFC_ETHDEV_STARTED:
+               sfcr_info(sr, "already started");
+               return 0;
+       default:
+               ret = -EINVAL;
+               goto fail_bad_state;
+       }
+
+       sr->state = SFC_ETHDEV_STARTING;
+
+       srs = sfc_repr_shared_by_eth_dev(dev);
+       ret = sfc_repr_proxy_start_repr(srs->pf_port_id, srs->repr_id);
+       if (ret != 0) {
+               SFC_ASSERT(ret > 0);
+               ret = -ret;
+               goto fail_start;
+       }
+
+       sr->state = SFC_ETHDEV_STARTED;
+
+       sfcr_info(sr, "done");
+
+       return 0;
+
+fail_start:
+       sr->state = SFC_ETHDEV_CONFIGURED;
+
+fail_bad_state:
+       sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
+       return ret;
+}
+
+static int
+sfc_repr_dev_start(struct rte_eth_dev *dev)
+{
+       struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+       int ret;
+
+       sfcr_info(sr, "entry");
+
+       sfc_repr_lock(sr);
+       ret = sfc_repr_start(dev);
+       sfc_repr_unlock(sr);
+
+       if (ret != 0)
+               goto fail_start;
+
+       sfcr_info(sr, "done");
+
+       return 0;
+
+fail_start:
+       sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
+       return ret;
+}
+
+static int
+sfc_repr_stop(struct rte_eth_dev *dev)
+{
+       struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+       struct sfc_repr_shared *srs;
+       unsigned int i;
+       int ret;
+
+       sfcr_info(sr, "entry");
+
+       SFC_ASSERT(sfc_repr_lock_is_locked(sr));
+
+       switch (sr->state) {
+       case SFC_ETHDEV_STARTED:
+               break;
+       case SFC_ETHDEV_CONFIGURED:
+               sfcr_info(sr, "already stopped");
+               return 0;
+       default:
+               sfcr_err(sr, "stop in unexpected state %u", sr->state);
+               SFC_ASSERT(B_FALSE);
+               ret = -EINVAL;
+               goto fail_bad_state;
+       }
+
+       srs = sfc_repr_shared_by_eth_dev(dev);
+       ret = sfc_repr_proxy_stop_repr(srs->pf_port_id, srs->repr_id);
+       if (ret != 0) {
+               SFC_ASSERT(ret > 0);
+               ret = -ret;
+               goto fail_stop;
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++)
+               sfc_repr_rx_queue_stop(dev->data->rx_queues[i]);
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++)
+               sfc_repr_tx_queue_stop(dev->data->tx_queues[i]);
+
+       sr->state = SFC_ETHDEV_CONFIGURED;
+       sfcr_info(sr, "done");
+
+       return 0;
+
+fail_bad_state:
+fail_stop:
+       sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
+
+       return ret;
+}
+
+static int
+sfc_repr_dev_stop(struct rte_eth_dev *dev)
+{
+       struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+       int ret;
+
+       sfcr_info(sr, "entry");
+
+       sfc_repr_lock(sr);
+
+       ret = sfc_repr_stop(dev);
+       if (ret != 0) {
+               sfcr_err(sr, "%s() failed to stop representor", __func__);
+               goto fail_stop;
+       }
+
+       sfc_repr_unlock(sr);
+
+       sfcr_info(sr, "done");
+
+       return 0;
+
+fail_stop:
+       sfc_repr_unlock(sr);
+
+       sfcr_err(sr, "%s() failed %s", __func__, rte_strerror(-ret));
+
+       return ret;
+}
+
 static int
 sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
                    const struct rte_eth_conf *conf)
@@ -269,6 +463,247 @@ sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int
+sfc_repr_dev_link_update(struct rte_eth_dev *dev,
+                        __rte_unused int wait_to_complete)
+{
+       struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+       struct rte_eth_link link;
+
+       if (sr->state != SFC_ETHDEV_STARTED) {
+               sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
+       } else {
+               memset(&link, 0, sizeof(link));
+               link.link_status = ETH_LINK_UP;
+               link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+       }
+
+       return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
+                    const char *type_name, uint16_t qid, uint16_t nb_desc,
+                    unsigned int socket_id, struct rte_ring **ring)
+{
+       char ring_name[RTE_RING_NAMESIZE];
+       int ret;
+
+       ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
+                      pf_port_id, repr_id, type_name, qid);
+       if (ret >= (int)sizeof(ring_name))
+               return -ENAMETOOLONG;
+
+       /*
+        * Single producer/consumer rings are used since the API for Tx/Rx
+        * packet burst for representors are guaranteed to be called from
+        * a single thread, and the user of the other end (representor proxy)
+        * is also single-threaded.
+        */
+       *ring = rte_ring_create(ring_name, nb_desc, socket_id,
+                              RING_F_SP_ENQ | RING_F_SC_DEQ);
+       if (*ring == NULL)
+               return -rte_errno;
+
+       return 0;
+}
+
+static int
+sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
+                       const struct rte_eth_rxconf *rx_conf)
+{
+       int ret = 0;
+
+       sfcr_info(sr, "entry");
+
+       if (rx_conf->rx_thresh.pthresh != 0 ||
+           rx_conf->rx_thresh.hthresh != 0 ||
+           rx_conf->rx_thresh.wthresh != 0) {
+               sfcr_warn(sr,
+                       "RxQ prefetch/host/writeback thresholds are not supported");
+       }
+
+       if (rx_conf->rx_free_thresh != 0)
+               sfcr_warn(sr, "RxQ free threshold is not supported");
+
+       if (rx_conf->rx_drop_en == 0)
+               sfcr_warn(sr, "RxQ drop disable is not supported");
+
+       if (rx_conf->rx_deferred_start) {
+               sfcr_err(sr, "Deferred start is not supported");
+               ret = -EINVAL;
+       }
+
+       sfcr_info(sr, "done: %s", rte_strerror(-ret));
+
+       return ret;
+}
+
+static int
+sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+                       uint16_t nb_rx_desc, unsigned int socket_id,
+                       __rte_unused const struct rte_eth_rxconf *rx_conf,
+                       struct rte_mempool *mb_pool)
+{
+       struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
+       struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+       struct sfc_repr_rxq *rxq;
+       int ret;
+
+       sfcr_info(sr, "entry");
+
+       ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
+       if (ret != 0)
+               goto fail_check_conf;
+
+       ret = -ENOMEM;
+       rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
+                                RTE_CACHE_LINE_SIZE, socket_id);
+       if (rxq == NULL) {
+               sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
+               goto fail_rxq_alloc;
+       }
+
+       ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
+                                  "rx", rx_queue_id, nb_rx_desc,
+                                  socket_id, &rxq->ring);
+       if (ret != 0) {
+               sfcr_err(sr, "%s() failed to create ring", __func__);
+               goto fail_ring_create;
+       }
+
+       ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
+                                    rx_queue_id, rxq->ring, mb_pool);
+       if (ret != 0) {
+               SFC_ASSERT(ret > 0);
+               ret = -ret;
+               sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
+               goto fail_proxy_add_rxq;
+       }
+
+       dev->data->rx_queues[rx_queue_id] = rxq;
+
+       sfcr_info(sr, "done");
+
+       return 0;
+
+fail_proxy_add_rxq:
+       rte_ring_free(rxq->ring);
+
+fail_ring_create:
+       rte_free(rxq);
+
+fail_rxq_alloc:
+fail_check_conf:
+       sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
+       return ret;
+}
+
+static void
+sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
+       struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
+
+       sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
+       rte_ring_free(rxq->ring);
+       rte_free(rxq);
+}
+
+static int
+sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
+                       const struct rte_eth_txconf *tx_conf)
+{
+       int ret = 0;
+
+       sfcr_info(sr, "entry");
+
+       if (tx_conf->tx_rs_thresh != 0)
+               sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
+
+       if (tx_conf->tx_free_thresh != 0)
+               sfcr_warn(sr, "TxQ free threshold is not supported");
+
+       if (tx_conf->tx_thresh.pthresh != 0 ||
+           tx_conf->tx_thresh.hthresh != 0 ||
+           tx_conf->tx_thresh.wthresh != 0) {
+               sfcr_warn(sr,
+                       "prefetch/host/writeback thresholds are not supported");
+       }
+
+       if (tx_conf->tx_deferred_start) {
+               sfcr_err(sr, "Deferred start is not supported");
+               ret = -EINVAL;
+       }
+
+       sfcr_info(sr, "done: %s", rte_strerror(-ret));
+
+       return ret;
+}
+
+static int
+sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+                       uint16_t nb_tx_desc, unsigned int socket_id,
+                       const struct rte_eth_txconf *tx_conf)
+{
+       struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
+       struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+       struct sfc_repr_txq *txq;
+       int ret;
+
+       sfcr_info(sr, "entry");
+
+       ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
+       if (ret != 0)
+               goto fail_check_conf;
+
+       ret = -ENOMEM;
+       txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
+                                RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq == NULL)
+               goto fail_txq_alloc;
+
+       ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
+                                  "tx", tx_queue_id, nb_tx_desc,
+                                  socket_id, &txq->ring);
+       if (ret != 0)
+               goto fail_ring_create;
+
+       ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
+                                    tx_queue_id, txq->ring,
+                                    &txq->egress_mport);
+       if (ret != 0)
+               goto fail_proxy_add_txq;
+
+       dev->data->tx_queues[tx_queue_id] = txq;
+
+       sfcr_info(sr, "done");
+
+       return 0;
+
+fail_proxy_add_txq:
+       rte_ring_free(txq->ring);
+
+fail_ring_create:
+       rte_free(txq);
+
+fail_txq_alloc:
+fail_check_conf:
+       sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
+       return ret;
+}
+
+static void
+sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
+       struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
+
+       sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
+       rte_ring_free(txq->ring);
+       rte_free(txq);
+}
+
 static void
 sfc_repr_close(struct sfc_repr *sr)
 {
@@ -287,11 +722,16 @@ sfc_repr_dev_close(struct rte_eth_dev *dev)
 {
        struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
        struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
+       unsigned int i;
 
        sfcr_info(sr, "entry");
 
        sfc_repr_lock(sr);
        switch (sr->state) {
+       case SFC_ETHDEV_STARTED:
+               sfc_repr_stop(dev);
+               SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
+               /* FALLTHROUGH */
        case SFC_ETHDEV_CONFIGURED:
                sfc_repr_close(sr);
                SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
@@ -303,6 +743,16 @@ sfc_repr_dev_close(struct rte_eth_dev *dev)
                break;
        }
 
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               sfc_repr_rx_queue_release(dev, i);
+               dev->data->rx_queues[i] = NULL;
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               sfc_repr_tx_queue_release(dev, i);
+               dev->data->tx_queues[i] = NULL;
+       }
+
        /*
         * Cleanup all resources.
         * Rollback primary process sfc_repr_eth_dev_init() below.
@@ -324,8 +774,15 @@ sfc_repr_dev_close(struct rte_eth_dev *dev)
 
 static const struct eth_dev_ops sfc_repr_dev_ops = {
        .dev_configure                  = sfc_repr_dev_configure,
+       .dev_start                      = sfc_repr_dev_start,
+       .dev_stop                       = sfc_repr_dev_stop,
        .dev_close                      = sfc_repr_dev_close,
        .dev_infos_get                  = sfc_repr_dev_infos_get,
+       .link_update                    = sfc_repr_dev_link_update,
+       .rx_queue_setup                 = sfc_repr_rx_queue_setup,
+       .rx_queue_release               = sfc_repr_rx_queue_release,
+       .tx_queue_setup                 = sfc_repr_tx_queue_setup,
+       .tx_queue_release               = sfc_repr_tx_queue_release,
 };
 
 
@@ -451,6 +908,7 @@ sfc_repr_create(struct rte_eth_dev *parent, uint16_t representor_id,
        struct sfc_repr_init_data repr_data;
        char name[RTE_ETH_NAME_MAX_LEN];
        int ret;
+       struct rte_eth_dev *dev;
 
        if (snprintf(name, sizeof(name), "net_%s_representor_%u",
                     parent->device->name, representor_id) >=
@@ -459,20 +917,24 @@ sfc_repr_create(struct rte_eth_dev *parent, uint16_t representor_id,
                return -ENAMETOOLONG;
        }
 
-       memset(&repr_data, 0, sizeof(repr_data));
-       repr_data.pf_port_id = parent->data->port_id;
-       repr_data.repr_id = representor_id;
-       repr_data.switch_domain_id = switch_domain_id;
-       repr_data.mport_sel = *mport_sel;
-
-       ret = rte_eth_dev_create(parent->device, name,
-                                 sizeof(struct sfc_repr_shared),
-                                 NULL, NULL,
-                                 sfc_repr_eth_dev_init, &repr_data);
-       if (ret != 0)
-               SFC_GENERIC_LOG(ERR, "%s() failed to create device", __func__);
-
-       SFC_GENERIC_LOG(INFO, "%s() done: %s", __func__, rte_strerror(-ret));
+       dev = rte_eth_dev_allocated(name);
+       if (dev == NULL) {
+               memset(&repr_data, 0, sizeof(repr_data));
+               repr_data.pf_port_id = parent->data->port_id;
+               repr_data.repr_id = representor_id;
+               repr_data.switch_domain_id = switch_domain_id;
+               repr_data.mport_sel = *mport_sel;
+
+               ret = rte_eth_dev_create(parent->device, name,
+                                        sizeof(struct sfc_repr_shared),
+                                        NULL, NULL,
+                                        sfc_repr_eth_dev_init, &repr_data);
+               if (ret != 0) {
+                       SFC_GENERIC_LOG(ERR, "%s() failed to create device",
+                                       __func__);
+                       return ret;
+               }
+       }
 
-       return ret;
+       return 0;
 }