#include "sfc_log.h"
#include "sfc_ev.h"
#include "sfc_rx.h"
+#include "sfc_mae_counter.h"
#include "sfc_tx.h"
#include "sfc_kvargs.h"
#include "sfc_tweak.h"
+#include "sfc_sw_stats.h"
+bool
+sfc_repr_supported(const struct sfc_adapter *sa)
+{
+ if (!sa->switchdev)
+ return false;
+
+ /*
+ * Representor proxy should use service lcore on PF's socket
+ * (sa->socket_id) to be efficient. But the proxy will fall back
+ * to any socket if it is not possible to get the service core
+ * on the same socket. Check that at least service core on any
+ * socket is available.
+ */
+ if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE)
+ return false;
+
+ return true;
+}
+
int
sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
size_t len, int socket_id, efsys_mem_t *esmp)
sfc_estimate_resource_limits(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
efx_drv_limits_t limits;
int rc;
uint32_t evq_allocated;
rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
- /* Subtract management EVQ not used for traffic */
- SFC_ASSERT(evq_allocated > 0);
+ /*
+ * Subtract management EVQ not used for traffic
+ * The resource allocation strategy is as follows:
+ * - one EVQ for management
+ * - one EVQ for each ethdev RXQ
+ * - one EVQ for each ethdev TXQ
+ * - one EVQ and one RXQ for optional MAE counters.
+ */
+ if (evq_allocated == 0) {
+ sfc_err(sa, "count of allocated EvQ is 0");
+ rc = ENOMEM;
+ goto fail_allocate_evq;
+ }
evq_allocated--;
- /* Right now we use separate EVQ for Rx and Tx */
- sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
- sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
+ /*
+ * Reserve absolutely required minimum.
+ * Right now we use separate EVQ for Rx and Tx.
+ */
+ if (rxq_allocated > 0 && evq_allocated > 0) {
+ sa->rxq_max = 1;
+ rxq_allocated--;
+ evq_allocated--;
+ }
+ if (txq_allocated > 0 && evq_allocated > 0) {
+ sa->txq_max = 1;
+ txq_allocated--;
+ evq_allocated--;
+ }
+
+ if (sfc_mae_counter_rxq_required(sa) &&
+ rxq_allocated > 0 && evq_allocated > 0) {
+ rxq_allocated--;
+ evq_allocated--;
+ sas->counters_rxq_allocated = true;
+ } else {
+ sas->counters_rxq_allocated = false;
+ }
+
+ /* Add remaining allocated queues */
+ sa->rxq_max += MIN(rxq_allocated, evq_allocated / 2);
+ sa->txq_max += MIN(txq_allocated, evq_allocated - sa->rxq_max);
/* Keep NIC initialized */
return 0;
+fail_allocate_evq:
fail_get_vi_pool:
efx_nic_fini(sa->nic);
fail_nic_init:
sfc_set_drv_limits(struct sfc_adapter *sa)
{
const struct rte_eth_dev_data *data = sa->eth_dev->data;
+ uint32_t rxq_reserved = sfc_nb_reserved_rxq(sfc_sa2shared(sa));
efx_drv_limits_t lim;
memset(&lim, 0, sizeof(lim));
- /* Limits are strict since take into account initial estimation */
+ /*
+ * Limits are strict since take into account initial estimation.
+ * Resource allocation stategy is described in
+ * sfc_estimate_resource_limits().
+ */
lim.edl_min_evq_count = lim.edl_max_evq_count =
- 1 + data->nb_rx_queues + data->nb_tx_queues;
- lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
+ 1 + data->nb_rx_queues + data->nb_tx_queues + rxq_reserved;
+ lim.edl_min_rxq_count = lim.edl_max_rxq_count =
+ data->nb_rx_queues + rxq_reserved;
lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
return efx_nic_set_drv_limits(sa->nic, &lim);
if (rc != 0)
goto fail_flows_insert;
+ rc = sfc_repr_proxy_start(sa);
+ if (rc != 0)
+ goto fail_repr_proxy_start;
+
sfc_log_init(sa, "done");
return 0;
+fail_repr_proxy_start:
+ sfc_flow_stop(sa);
+
fail_flows_insert:
sfc_tx_stop(sa);
sa->state = SFC_ADAPTER_STOPPING;
+ sfc_repr_proxy_stop(sa);
sfc_flow_stop(sa);
sfc_tx_stop(sa);
sfc_rx_stop(sa);
if (rc != 0)
goto fail_tx_configure;
+ rc = sfc_sw_xstats_configure(sa);
+ if (rc != 0)
+ goto fail_sw_xstats_configure;
+
sa->state = SFC_ADAPTER_CONFIGURED;
sfc_log_init(sa, "done");
return 0;
+fail_sw_xstats_configure:
+ sfc_tx_close(sa);
+
fail_tx_configure:
sfc_rx_close(sa);
SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
sa->state = SFC_ADAPTER_CLOSING;
+ sfc_sw_xstats_close(sa);
sfc_tx_close(sa);
sfc_rx_close(sa);
sfc_port_close(sa);
if (rc != 0)
goto fail_filter_attach;
+ rc = sfc_mae_counter_rxq_attach(sa);
+ if (rc != 0)
+ goto fail_mae_counter_rxq_attach;
+
rc = sfc_mae_attach(sa);
if (rc != 0)
goto fail_mae_attach;
+ rc = sfc_mae_switchdev_init(sa);
+ if (rc != 0)
+ goto fail_mae_switchdev_init;
+
+ rc = sfc_repr_proxy_attach(sa);
+ if (rc != 0)
+ goto fail_repr_proxy_attach;
+
sfc_log_init(sa, "fini nic");
efx_nic_fini(enp);
sfc_flow_init(sa);
+ rc = sfc_sw_xstats_init(sa);
+ if (rc != 0)
+ goto fail_sw_xstats_init;
+
/*
* Create vSwitch to be able to use VFs when PF is not started yet
* as DPDK port. VFs should be able to talk to each other even
return 0;
fail_sriov_vswitch_create:
+ sfc_sw_xstats_close(sa);
+
+fail_sw_xstats_init:
sfc_flow_fini(sa);
+ sfc_repr_proxy_detach(sa);
+
+fail_repr_proxy_attach:
+ sfc_mae_switchdev_fini(sa);
+
+fail_mae_switchdev_init:
sfc_mae_detach(sa);
fail_mae_attach:
+ sfc_mae_counter_rxq_detach(sa);
+
+fail_mae_counter_rxq_attach:
sfc_filter_detach(sa);
fail_filter_attach:
sfc_flow_fini(sa);
+ sfc_repr_proxy_detach(sa);
+ sfc_mae_switchdev_fini(sa);
sfc_mae_detach(sa);
+ sfc_mae_counter_rxq_detach(sa);
sfc_filter_detach(sa);
sfc_rss_detach(sa);
sfc_port_detach(sa);