#include <stdint.h>
+#include <rte_mbuf.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <ethdev_driver.h>
#include "sfc_ethdev_state.h"
#include "sfc_repr_proxy_api.h"
#include "sfc_switch.h"
+#include "sfc_dp_tx.h"
/** Multi-process shared representor private data */
struct sfc_repr_shared {
/* Just for symmetry of the API */
}
+static void
+sfc_repr_rx_queue_stop(void *queue)
+{
+ struct sfc_repr_rxq *rxq = queue;
+
+ if (rxq == NULL)
+ return;
+
+ rte_ring_reset(rxq->ring);
+}
+
+static void
+sfc_repr_tx_queue_stop(void *queue)
+{
+ struct sfc_repr_txq *txq = queue;
+
+ if (txq == NULL)
+ return;
+
+ rte_ring_reset(txq->ring);
+}
+
+static uint16_t
+sfc_repr_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_repr_rxq *rxq = rx_queue;
+ void **objs = (void *)&rx_pkts[0];
+
+ /* mbufs port is already filled correctly by representors proxy */
+ return rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL);
+}
+
+static uint16_t
+sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_repr_txq *txq = tx_queue;
+ unsigned int n_tx;
+ void **objs;
+ uint16_t i;
+
+ /*
+ * mbuf is likely cache-hot. Set flag and egress m-port here instead of
+ * doing that in representors proxy. Also, it should help to avoid
+ * cache bounce. Moreover, potentially, it allows to use one
+ * multi-producer single-consumer ring for all representors.
+ *
+ * The only potential problem is doing so many times if enqueue
+ * fails and sender retries.
+ */
+ for (i = 0; i < nb_pkts; ++i) {
+ struct rte_mbuf *m = tx_pkts[i];
+
+ m->ol_flags |= sfc_dp_mport_override;
+ *RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset,
+ efx_mport_id_t *) = txq->egress_mport;
+ }
+
+ objs = (void *)&tx_pkts[0];
+ n_tx = rte_ring_sp_enqueue_burst(txq->ring, objs, nb_pkts, NULL);
+
+ /*
+ * Remove m-port override flag from packets that were not enqueued
+ * Setting the flag only for enqueued packets after the burst is
+ * not possible since the ownership of enqueued packets is
+ * transferred to representor proxy.
+ */
+ for (i = n_tx; i < nb_pkts; ++i) {
+ struct rte_mbuf *m = tx_pkts[i];
+
+ m->ol_flags &= ~sfc_dp_mport_override;
+ }
+
+ return n_tx;
+}
+
+static int
+sfc_repr_start(struct rte_eth_dev *dev)
+{
+ struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+ struct sfc_repr_shared *srs;
+ int ret;
+
+ sfcr_info(sr, "entry");
+
+ SFC_ASSERT(sfc_repr_lock_is_locked(sr));
+
+ switch (sr->state) {
+ case SFC_ETHDEV_CONFIGURED:
+ break;
+ case SFC_ETHDEV_STARTED:
+ sfcr_info(sr, "already started");
+ return 0;
+ default:
+ ret = -EINVAL;
+ goto fail_bad_state;
+ }
+
+ sr->state = SFC_ETHDEV_STARTING;
+
+ srs = sfc_repr_shared_by_eth_dev(dev);
+ ret = sfc_repr_proxy_start_repr(srs->pf_port_id, srs->repr_id);
+ if (ret != 0) {
+ SFC_ASSERT(ret > 0);
+ ret = -ret;
+ goto fail_start;
+ }
+
+ sr->state = SFC_ETHDEV_STARTED;
+
+ sfcr_info(sr, "done");
+
+ return 0;
+
+fail_start:
+ sr->state = SFC_ETHDEV_CONFIGURED;
+
+fail_bad_state:
+ sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
+ return ret;
+}
+
+static int
+sfc_repr_dev_start(struct rte_eth_dev *dev)
+{
+ struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+ int ret;
+
+ sfcr_info(sr, "entry");
+
+ sfc_repr_lock(sr);
+ ret = sfc_repr_start(dev);
+ sfc_repr_unlock(sr);
+
+ if (ret != 0)
+ goto fail_start;
+
+ sfcr_info(sr, "done");
+
+ return 0;
+
+fail_start:
+ sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
+ return ret;
+}
+
+static int
+sfc_repr_stop(struct rte_eth_dev *dev)
+{
+ struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+ struct sfc_repr_shared *srs;
+ unsigned int i;
+ int ret;
+
+ sfcr_info(sr, "entry");
+
+ SFC_ASSERT(sfc_repr_lock_is_locked(sr));
+
+ switch (sr->state) {
+ case SFC_ETHDEV_STARTED:
+ break;
+ case SFC_ETHDEV_CONFIGURED:
+ sfcr_info(sr, "already stopped");
+ return 0;
+ default:
+ sfcr_err(sr, "stop in unexpected state %u", sr->state);
+ SFC_ASSERT(B_FALSE);
+ ret = -EINVAL;
+ goto fail_bad_state;
+ }
+
+ srs = sfc_repr_shared_by_eth_dev(dev);
+ ret = sfc_repr_proxy_stop_repr(srs->pf_port_id, srs->repr_id);
+ if (ret != 0) {
+ SFC_ASSERT(ret > 0);
+ ret = -ret;
+ goto fail_stop;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ sfc_repr_rx_queue_stop(dev->data->rx_queues[i]);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ sfc_repr_tx_queue_stop(dev->data->tx_queues[i]);
+
+ sr->state = SFC_ETHDEV_CONFIGURED;
+ sfcr_info(sr, "done");
+
+ return 0;
+
+fail_bad_state:
+fail_stop:
+ sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
+
+ return ret;
+}
+
+static int
+sfc_repr_dev_stop(struct rte_eth_dev *dev)
+{
+ struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+ int ret;
+
+ sfcr_info(sr, "entry");
+
+ sfc_repr_lock(sr);
+
+ ret = sfc_repr_stop(dev);
+ if (ret != 0) {
+ sfcr_err(sr, "%s() failed to stop representor", __func__);
+ goto fail_stop;
+ }
+
+ sfc_repr_unlock(sr);
+
+ sfcr_info(sr, "done");
+
+ return 0;
+
+fail_stop:
+ sfc_repr_unlock(sr);
+
+ sfcr_err(sr, "%s() failed %s", __func__, rte_strerror(-ret));
+
+ return ret;
+}
+
static int
sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
const struct rte_eth_conf *conf)
return 0;
}
+static int
+sfc_repr_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+ struct rte_eth_link link;
+
+ if (sr->state != SFC_ETHDEV_STARTED) {
+ sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
+ } else {
+ memset(&link, 0, sizeof(link));
+ link.link_status = ETH_LINK_UP;
+ link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
static int
sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
const char *type_name, uint16_t qid, uint16_t nb_desc,
sfc_repr_lock(sr);
switch (sr->state) {
+ case SFC_ETHDEV_STARTED:
+ sfc_repr_stop(dev);
+ SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
+ /* FALLTHROUGH */
case SFC_ETHDEV_CONFIGURED:
sfc_repr_close(sr);
SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
(void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
dev->dev_ops = NULL;
sfc_repr_unlock(sr);
static const struct eth_dev_ops sfc_repr_dev_ops = {
.dev_configure = sfc_repr_dev_configure,
+ .dev_start = sfc_repr_dev_start,
+ .dev_stop = sfc_repr_dev_stop,
.dev_close = sfc_repr_dev_close,
.dev_infos_get = sfc_repr_dev_infos_get,
+ .link_update = sfc_repr_dev_link_update,
.rx_queue_setup = sfc_repr_rx_queue_setup,
.rx_queue_release = sfc_repr_rx_queue_release,
.tx_queue_setup = sfc_repr_tx_queue_setup,
goto fail_mac_addrs;
}
+ dev->rx_pkt_burst = sfc_repr_rx_burst;
+ dev->tx_pkt_burst = sfc_repr_tx_burst;
dev->dev_ops = &sfc_repr_dev_ops;
sr->state = SFC_ETHDEV_INITIALIZED;
struct sfc_repr_init_data repr_data;
char name[RTE_ETH_NAME_MAX_LEN];
int ret;
+ struct rte_eth_dev *dev;
if (snprintf(name, sizeof(name), "net_%s_representor_%u",
parent->device->name, representor_id) >=
return -ENAMETOOLONG;
}
- memset(&repr_data, 0, sizeof(repr_data));
- repr_data.pf_port_id = parent->data->port_id;
- repr_data.repr_id = representor_id;
- repr_data.switch_domain_id = switch_domain_id;
- repr_data.mport_sel = *mport_sel;
-
- ret = rte_eth_dev_create(parent->device, name,
- sizeof(struct sfc_repr_shared),
- NULL, NULL,
- sfc_repr_eth_dev_init, &repr_data);
- if (ret != 0)
- SFC_GENERIC_LOG(ERR, "%s() failed to create device", __func__);
-
- SFC_GENERIC_LOG(INFO, "%s() done: %s", __func__, rte_strerror(-ret));
+ dev = rte_eth_dev_allocated(name);
+ if (dev == NULL) {
+ memset(&repr_data, 0, sizeof(repr_data));
+ repr_data.pf_port_id = parent->data->port_id;
+ repr_data.repr_id = representor_id;
+ repr_data.switch_domain_id = switch_domain_id;
+ repr_data.mport_sel = *mport_sel;
+
+ ret = rte_eth_dev_create(parent->device, name,
+ sizeof(struct sfc_repr_shared),
+ NULL, NULL,
+ sfc_repr_eth_dev_init, &repr_data);
+ if (ret != 0) {
+ SFC_GENERIC_LOG(ERR, "%s() failed to create device",
+ __func__);
+ return ret;
+ }
+ }
- return ret;
+ return 0;
}