uint16_t switch_port_id;
};
+struct sfc_repr_queue_stats {
+ union sfc_pkts_bytes packets_bytes;
+};
+
struct sfc_repr_rxq {
/* Datapath members */
struct rte_ring *ring;
+ struct sfc_repr_queue_stats stats;
};
struct sfc_repr_txq {
/* Datapath members */
struct rte_ring *ring;
efx_mport_id_t egress_mport;
+ struct sfc_repr_queue_stats stats;
};
/** Primary process representor private data */
rte_ring_reset(txq->ring);
}
+static uint16_t
+sfc_repr_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_repr_rxq *rxq = rx_queue;
+ void **objs = (void *)&rx_pkts[0];
+ unsigned int n_rx;
+
+ /* mbufs port is already filled correctly by representors proxy */
+ n_rx = rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL);
+
+ if (n_rx > 0) {
+ unsigned int n_bytes = 0;
+ unsigned int i = 0;
+
+ do {
+ n_bytes += rx_pkts[i]->pkt_len;
+ } while (++i < n_rx);
+
+ sfc_pkts_bytes_add(&rxq->stats.packets_bytes, n_rx, n_bytes);
+ }
+
+ return n_rx;
+}
+
+static uint16_t
+sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_repr_txq *txq = tx_queue;
+ unsigned int n_bytes = 0;
+ unsigned int n_tx;
+ void **objs;
+ uint16_t i;
+
+ /*
+ * mbuf is likely cache-hot. Set flag and egress m-port here instead of
+ * doing that in representors proxy. Also, it should help to avoid
+ * cache bounce. Moreover, potentially, it allows to use one
+ * multi-producer single-consumer ring for all representors.
+ *
+ * The only potential problem is doing so many times if enqueue
+ * fails and sender retries.
+ */
+ for (i = 0; i < nb_pkts; ++i) {
+ struct rte_mbuf *m = tx_pkts[i];
+
+ m->ol_flags |= sfc_dp_mport_override;
+ *RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset,
+ efx_mport_id_t *) = txq->egress_mport;
+ n_bytes += tx_pkts[i]->pkt_len;
+ }
+
+ objs = (void *)&tx_pkts[0];
+ n_tx = rte_ring_sp_enqueue_burst(txq->ring, objs, nb_pkts, NULL);
+
+ /*
+ * Remove m-port override flag from packets that were not enqueued
+ * Setting the flag only for enqueued packets after the burst is
+ * not possible since the ownership of enqueued packets is
+ * transferred to representor proxy. The same logic applies to
+ * counting the enqueued packets' bytes.
+ */
+ for (i = n_tx; i < nb_pkts; ++i) {
+ struct rte_mbuf *m = tx_pkts[i];
+
+ m->ol_flags &= ~sfc_dp_mport_override;
+ n_bytes -= m->pkt_len;
+ }
+
+ sfc_pkts_bytes_add(&txq->stats.packets_bytes, n_tx, n_bytes);
+
+ return n_tx;
+}
+
static int
sfc_repr_start(struct rte_eth_dev *dev)
{
}
switch (conf->rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
if (nb_rx_queues != 1) {
sfcr_err(sr, "Rx RSS is not supported with %u queues",
nb_rx_queues);
ret = -EINVAL;
}
break;
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
break;
default:
sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
break;
}
- if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
+ if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
sfcr_err(sr, "Tx mode MQ modes not supported");
ret = -EINVAL;
}
return 0;
}
+static int
+sfc_repr_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
+ struct rte_eth_link link;
+
+ if (sr->state != SFC_ETHDEV_STARTED) {
+ sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
+ } else {
+ memset(&link, 0, sizeof(link));
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
static int
sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
const char *type_name, uint16_t qid, uint16_t nb_desc,
(void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
dev->dev_ops = NULL;
sfc_repr_unlock(sr);
return 0;
}
+static int
+sfc_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ union sfc_pkts_bytes queue_stats;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct sfc_repr_rxq *rxq = dev->data->rx_queues[i];
+
+ sfc_pkts_bytes_get(&rxq->stats.packets_bytes,
+ &queue_stats);
+
+ stats->ipackets += queue_stats.pkts;
+ stats->ibytes += queue_stats.bytes;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct sfc_repr_txq *txq = dev->data->tx_queues[i];
+
+ sfc_pkts_bytes_get(&txq->stats.packets_bytes,
+ &queue_stats);
+
+ stats->opackets += queue_stats.pkts;
+ stats->obytes += queue_stats.bytes;
+ }
+
+ return 0;
+}
+
static const struct eth_dev_ops sfc_repr_dev_ops = {
.dev_configure = sfc_repr_dev_configure,
.dev_start = sfc_repr_dev_start,
.dev_stop = sfc_repr_dev_stop,
.dev_close = sfc_repr_dev_close,
.dev_infos_get = sfc_repr_dev_infos_get,
+ .link_update = sfc_repr_dev_link_update,
+ .stats_get = sfc_repr_stats_get,
.rx_queue_setup = sfc_repr_rx_queue_setup,
.rx_queue_release = sfc_repr_rx_queue_release,
.tx_queue_setup = sfc_repr_tx_queue_setup,
struct sfc_repr_init_data {
uint16_t pf_port_id;
- uint16_t repr_id;
uint16_t switch_domain_id;
efx_mport_sel_t mport_sel;
+ efx_pcie_interface_t intf;
+ uint16_t pf;
+ uint16_t vf;
};
static int
switch_port_request.ethdev_mportp = ðdev_mport_sel;
switch_port_request.entity_mportp = &repr_data->mport_sel;
switch_port_request.ethdev_port_id = dev->data->port_id;
+ switch_port_request.port_data.repr.intf = repr_data->intf;
+ switch_port_request.port_data.repr.pf = repr_data->pf;
+ switch_port_request.port_data.repr.vf = repr_data->vf;
ret = sfc_repr_assign_mae_switch_port(repr_data->switch_domain_id,
&switch_port_request,
}
ret = sfc_repr_proxy_add_port(repr_data->pf_port_id,
- repr_data->repr_id,
+ srs->switch_port_id,
dev->data->port_id,
&repr_data->mport_sel);
if (ret != 0) {
dev->process_private = sr;
srs->pf_port_id = repr_data->pf_port_id;
- srs->repr_id = repr_data->repr_id;
+ srs->repr_id = srs->switch_port_id;
srs->switch_domain_id = repr_data->switch_domain_id;
dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
goto fail_mac_addrs;
}
+ dev->rx_pkt_burst = sfc_repr_rx_burst;
+ dev->tx_pkt_burst = sfc_repr_tx_burst;
dev->dev_ops = &sfc_repr_dev_ops;
sr->state = SFC_ETHDEV_INITIALIZED;
fail_alloc_sr:
(void)sfc_repr_proxy_del_port(repr_data->pf_port_id,
- repr_data->repr_id);
+ srs->switch_port_id);
fail_create_port:
fail_mae_assign_switch_port:
}
int
-sfc_repr_create(struct rte_eth_dev *parent, uint16_t representor_id,
- uint16_t switch_domain_id, const efx_mport_sel_t *mport_sel)
+sfc_repr_create(struct rte_eth_dev *parent,
+ struct sfc_repr_entity_info *entity,
+ uint16_t switch_domain_id,
+ const efx_mport_sel_t *mport_sel)
{
struct sfc_repr_init_data repr_data;
char name[RTE_ETH_NAME_MAX_LEN];
+ int controller;
int ret;
+ int rc;
+ struct rte_eth_dev *dev;
+
+ controller = -1;
+ rc = sfc_mae_switch_domain_get_controller(switch_domain_id,
+ entity->intf, &controller);
+ if (rc != 0) {
+ SFC_GENERIC_LOG(ERR, "%s() failed to get DPDK controller for %d",
+ __func__, entity->intf);
+ return -rc;
+ }
+
+ switch (entity->type) {
+ case RTE_ETH_REPRESENTOR_VF:
+ ret = snprintf(name, sizeof(name), "net_%s_representor_c%upf%uvf%u",
+ parent->device->name, controller, entity->pf,
+ entity->vf);
+ break;
+ case RTE_ETH_REPRESENTOR_PF:
+ ret = snprintf(name, sizeof(name), "net_%s_representor_c%upf%u",
+ parent->device->name, controller, entity->pf);
+ break;
+ default:
+ return -ENOTSUP;
+ }
- if (snprintf(name, sizeof(name), "net_%s_representor_%u",
- parent->device->name, representor_id) >=
- (int)sizeof(name)) {
+ if (ret >= (int)sizeof(name)) {
SFC_GENERIC_LOG(ERR, "%s() failed name too long", __func__);
return -ENAMETOOLONG;
}
- memset(&repr_data, 0, sizeof(repr_data));
- repr_data.pf_port_id = parent->data->port_id;
- repr_data.repr_id = representor_id;
- repr_data.switch_domain_id = switch_domain_id;
- repr_data.mport_sel = *mport_sel;
-
- ret = rte_eth_dev_create(parent->device, name,
- sizeof(struct sfc_repr_shared),
- NULL, NULL,
- sfc_repr_eth_dev_init, &repr_data);
- if (ret != 0)
- SFC_GENERIC_LOG(ERR, "%s() failed to create device", __func__);
-
- SFC_GENERIC_LOG(INFO, "%s() done: %s", __func__, rte_strerror(-ret));
+ dev = rte_eth_dev_allocated(name);
+ if (dev == NULL) {
+ memset(&repr_data, 0, sizeof(repr_data));
+ repr_data.pf_port_id = parent->data->port_id;
+ repr_data.switch_domain_id = switch_domain_id;
+ repr_data.mport_sel = *mport_sel;
+ repr_data.intf = entity->intf;
+ repr_data.pf = entity->pf;
+ repr_data.vf = entity->vf;
+
+ ret = rte_eth_dev_create(parent->device, name,
+ sizeof(struct sfc_repr_shared),
+ NULL, NULL,
+ sfc_repr_eth_dev_init, &repr_data);
+ if (ret != 0) {
+ SFC_GENERIC_LOG(ERR, "%s() failed to create device",
+ __func__);
+ return ret;
+ }
+ }
- return ret;
+ return 0;
}