#include "sfc_log.h"
#include "sfc_service.h"
#include "sfc_repr_proxy.h"
+#include "sfc_repr_proxy_api.h"
#include "sfc.h"
+/**
+ * Amount of time to wait for the representor proxy routine (which is
+ * running on a service core) to handle a request sent via mbox.
+ */
+#define SFC_REPR_PROXY_MBOX_POLL_TIMEOUT_MS 1000
+
+static struct sfc_repr_proxy *
+sfc_repr_proxy_by_adapter(struct sfc_adapter *sa)
+{
+ return &sa->repr_proxy;
+}
+
+static struct sfc_adapter *
+sfc_get_adapter_by_pf_port_id(uint16_t pf_port_id)
+{
+ struct rte_eth_dev *dev;
+ struct sfc_adapter *sa;
+
+ SFC_ASSERT(pf_port_id < RTE_MAX_ETHPORTS);
+
+ dev = &rte_eth_devices[pf_port_id];
+ sa = sfc_adapter_by_eth_dev(dev);
+
+ sfc_adapter_lock(sa);
+
+ return sa;
+}
+
+static void
+sfc_put_adapter(struct sfc_adapter *sa)
+{
+ sfc_adapter_unlock(sa);
+}
+
+static int
+sfc_repr_proxy_mbox_send(struct sfc_repr_proxy_mbox *mbox,
+ struct sfc_repr_proxy_port *port,
+ enum sfc_repr_proxy_mbox_op op)
+{
+ const unsigned int wait_ms = SFC_REPR_PROXY_MBOX_POLL_TIMEOUT_MS;
+ unsigned int i;
+
+ mbox->op = op;
+ mbox->port = port;
+ mbox->ack = false;
+
+ /*
+ * Release ordering enforces marker set after data is populated.
+ * Paired with acquire ordering in sfc_repr_proxy_mbox_handle().
+ */
+ __atomic_store_n(&mbox->write_marker, true, __ATOMIC_RELEASE);
+
+ /*
+ * Wait for the representor routine to process the request.
+ * Give up on timeout.
+ */
+ for (i = 0; i < wait_ms; i++) {
+ /*
+ * Paired with release ordering in sfc_repr_proxy_mbox_handle()
+ * on acknowledge write.
+ */
+ if (__atomic_load_n(&mbox->ack, __ATOMIC_ACQUIRE))
+ break;
+
+ rte_delay_ms(1);
+ }
+
+ if (i == wait_ms) {
+ SFC_GENERIC_LOG(ERR,
+ "%s() failed to wait for representor proxy routine ack",
+ __func__);
+ return ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void
+sfc_repr_proxy_mbox_handle(struct sfc_repr_proxy *rp)
+{
+ struct sfc_repr_proxy_mbox *mbox = &rp->mbox;
+
+ /*
+ * Paired with release ordering in sfc_repr_proxy_mbox_send()
+ * on marker set.
+ */
+ if (!__atomic_load_n(&mbox->write_marker, __ATOMIC_ACQUIRE))
+ return;
+
+ mbox->write_marker = false;
+
+ switch (mbox->op) {
+ case SFC_REPR_PROXY_MBOX_ADD_PORT:
+ TAILQ_INSERT_TAIL(&rp->ports, mbox->port, entries);
+ break;
+ case SFC_REPR_PROXY_MBOX_DEL_PORT:
+ TAILQ_REMOVE(&rp->ports, mbox->port, entries);
+ break;
+ default:
+ SFC_ASSERT(0);
+ return;
+ }
+
+ /*
+ * Paired with acquire ordering in sfc_repr_proxy_mbox_send()
+ * on acknowledge read.
+ */
+ __atomic_store_n(&mbox->ack, true, __ATOMIC_RELEASE);
+}
+
static int32_t
sfc_repr_proxy_routine(void *arg)
{
struct sfc_repr_proxy *rp = arg;
- /* Representor proxy boilerplate will be here */
- RTE_SET_USED(rp);
+ sfc_repr_proxy_mbox_handle(rp);
+
+ return 0;
+}
+
+static int
+sfc_repr_proxy_ports_init(struct sfc_adapter *sa)
+{
+ struct sfc_repr_proxy *rp = &sa->repr_proxy;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = efx_mcdi_mport_alloc_alias(sa->nic, &rp->mport_alias, NULL);
+ if (rc != 0) {
+ sfc_err(sa, "failed to alloc mport alias: %s",
+ rte_strerror(rc));
+ goto fail_alloc_mport_alias;
+ }
+
+ TAILQ_INIT(&rp->ports);
+
+ sfc_log_init(sa, "done");
return 0;
+
+fail_alloc_mport_alias:
+
+ sfc_log_init(sa, "failed: %s", rte_strerror(rc));
+ return rc;
+}
+
+void
+sfc_repr_proxy_pre_detach(struct sfc_adapter *sa)
+{
+ struct sfc_repr_proxy *rp = &sa->repr_proxy;
+ bool close_ports[RTE_MAX_ETHPORTS] = {0};
+ struct sfc_repr_proxy_port *port;
+ unsigned int i;
+
+ SFC_ASSERT(!sfc_adapter_is_locked(sa));
+
+ sfc_adapter_lock(sa);
+
+ if (sfc_repr_available(sfc_sa2shared(sa))) {
+ TAILQ_FOREACH(port, &rp->ports, entries)
+ close_ports[port->rte_port_id] = true;
+ } else {
+ sfc_log_init(sa, "representors not supported - skip");
+ }
+
+ sfc_adapter_unlock(sa);
+
+ for (i = 0; i < RTE_DIM(close_ports); i++) {
+ if (close_ports[i]) {
+ rte_eth_dev_stop(i);
+ rte_eth_dev_close(i);
+ }
+ }
+}
+
+static void
+sfc_repr_proxy_ports_fini(struct sfc_adapter *sa)
+{
+ struct sfc_repr_proxy *rp = &sa->repr_proxy;
+
+ efx_mae_mport_free(sa->nic, &rp->mport_alias);
}
int
return 0;
}
+ rc = sfc_repr_proxy_ports_init(sa);
+ if (rc != 0)
+ goto fail_ports_init;
+
cid = sfc_get_service_lcore(sa->socket_id);
if (cid == RTE_MAX_LCORE && sa->socket_id != SOCKET_ID_ANY) {
/* Warn and try to allocate on any NUMA node */
*/
fail_get_service_lcore:
+ sfc_repr_proxy_ports_fini(sa);
+
+fail_ports_init:
sfc_log_init(sa, "failed: %s", rte_strerror(rc));
return rc;
}
rte_service_map_lcore_set(rp->service_id, rp->service_core_id, 0);
rte_service_component_unregister(rp->service_id);
+ sfc_repr_proxy_ports_fini(sa);
sfc_log_init(sa, "done");
}
goto fail_runstate_set;
}
+ rp->started = true;
+
sfc_log_init(sa, "done");
return 0;
/* Service lcore may be shared and we never stop it */
+ rp->started = false;
+
+ sfc_log_init(sa, "done");
+}
+
+static struct sfc_repr_proxy_port *
+sfc_repr_proxy_find_port(struct sfc_repr_proxy *rp, uint16_t repr_id)
+{
+ struct sfc_repr_proxy_port *port;
+
+ TAILQ_FOREACH(port, &rp->ports, entries) {
+ if (port->repr_id == repr_id)
+ return port;
+ }
+
+ return NULL;
+}
+
+int
+sfc_repr_proxy_add_port(uint16_t pf_port_id, uint16_t repr_id,
+ uint16_t rte_port_id, const efx_mport_sel_t *mport_sel)
+{
+ struct sfc_repr_proxy_port *port;
+ struct sfc_repr_proxy *rp;
+ struct sfc_adapter *sa;
+ int rc;
+
+ sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ rp = sfc_repr_proxy_by_adapter(sa);
+
+ sfc_log_init(sa, "entry");
+ TAILQ_FOREACH(port, &rp->ports, entries) {
+ if (port->rte_port_id == rte_port_id) {
+ rc = EEXIST;
+ sfc_err(sa, "%s() failed: port exists", __func__);
+ goto fail_port_exists;
+ }
+ }
+
+ port = rte_zmalloc("sfc-repr-proxy-port", sizeof(*port),
+ sa->socket_id);
+ if (port == NULL) {
+ rc = ENOMEM;
+ sfc_err(sa, "failed to alloc memory for proxy port");
+ goto fail_alloc_port;
+ }
+
+ rc = efx_mae_mport_id_by_selector(sa->nic, mport_sel,
+ &port->egress_mport);
+ if (rc != 0) {
+ sfc_err(sa,
+ "failed get MAE mport id by selector (repr_id %u): %s",
+ repr_id, rte_strerror(rc));
+ goto fail_mport_id;
+ }
+
+ port->rte_port_id = rte_port_id;
+ port->repr_id = repr_id;
+
+ if (rp->started) {
+ rc = sfc_repr_proxy_mbox_send(&rp->mbox, port,
+ SFC_REPR_PROXY_MBOX_ADD_PORT);
+ if (rc != 0) {
+ sfc_err(sa, "failed to add proxy port %u",
+ port->repr_id);
+ goto fail_port_add;
+ }
+ } else {
+ TAILQ_INSERT_TAIL(&rp->ports, port, entries);
+ }
+
+ sfc_log_init(sa, "done");
+ sfc_put_adapter(sa);
+
+ return 0;
+
+fail_port_add:
+fail_mport_id:
+ rte_free(port);
+fail_alloc_port:
+fail_port_exists:
+ sfc_log_init(sa, "failed: %s", rte_strerror(rc));
+ sfc_put_adapter(sa);
+
+ return rc;
+}
+
+int
+sfc_repr_proxy_del_port(uint16_t pf_port_id, uint16_t repr_id)
+{
+ struct sfc_repr_proxy_port *port;
+ struct sfc_repr_proxy *rp;
+ struct sfc_adapter *sa;
+ int rc;
+
+ sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ rp = sfc_repr_proxy_by_adapter(sa);
+
+ sfc_log_init(sa, "entry");
+
+ port = sfc_repr_proxy_find_port(rp, repr_id);
+ if (port == NULL) {
+ sfc_err(sa, "failed: no such port");
+ rc = ENOENT;
+ goto fail_no_port;
+ }
+
+ if (rp->started) {
+ rc = sfc_repr_proxy_mbox_send(&rp->mbox, port,
+ SFC_REPR_PROXY_MBOX_DEL_PORT);
+ if (rc != 0) {
+ sfc_err(sa, "failed to remove proxy port %u",
+ port->repr_id);
+ goto fail_port_remove;
+ }
+ } else {
+ TAILQ_REMOVE(&rp->ports, port, entries);
+ }
+
+ rte_free(port);
+
sfc_log_init(sa, "done");
+
+ sfc_put_adapter(sa);
+
+ return 0;
+
+fail_port_remove:
+fail_no_port:
+ sfc_log_init(sa, "failed: %s", rte_strerror(rc));
+ sfc_put_adapter(sa);
+
+ return rc;
}