#include "sfc_flow.h"
#include "sfc_dp.h"
#include "sfc_dp_rx.h"
+#include "sfc_sw_stats.h"
+
+#define SFC_XSTAT_ID_INVALID_VAL UINT64_MAX
+#define SFC_XSTAT_ID_INVALID_NAME '\0'
uint32_t sfc_logtype_driver;
}
static void
-sfc_rx_queue_release(void *queue)
+sfc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_dp_rxq *dp_rxq = dev->data->rx_queues[qid];
struct sfc_rxq *rxq;
struct sfc_adapter *sa;
sfc_sw_index_t sw_index;
}
static void
-sfc_tx_queue_release(void *queue)
+sfc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct sfc_dp_txq *dp_txq = queue;
+ struct sfc_dp_txq *dp_txq = dev->data->tx_queues[qid];
struct sfc_txq *txq;
sfc_sw_index_t sw_index;
struct sfc_adapter *sa;
sfc_adapter_unlock(sa);
}
+static void
+sfc_stats_get_dp_rx(struct sfc_adapter *sa, uint64_t *pkts, uint64_t *bytes)
+{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ uint64_t pkts_sum = 0;
+ uint64_t bytes_sum = 0;
+ unsigned int i;
+
+ for (i = 0; i < sas->ethdev_rxq_count; ++i) {
+ struct sfc_rxq_info *rxq_info;
+
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, i);
+ if (rxq_info->state & SFC_RXQ_INITIALIZED) {
+ union sfc_pkts_bytes qstats;
+
+ sfc_pkts_bytes_get(&rxq_info->dp->dpq.stats, &qstats);
+ pkts_sum += qstats.pkts -
+ sa->sw_stats.reset_rx_pkts[i];
+ bytes_sum += qstats.bytes -
+ sa->sw_stats.reset_rx_bytes[i];
+ }
+ }
+
+ *pkts = pkts_sum;
+ *bytes = bytes_sum;
+}
+
+static void
+sfc_stats_get_dp_tx(struct sfc_adapter *sa, uint64_t *pkts, uint64_t *bytes)
+{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ uint64_t pkts_sum = 0;
+ uint64_t bytes_sum = 0;
+ unsigned int i;
+
+ for (i = 0; i < sas->ethdev_txq_count; ++i) {
+ struct sfc_txq_info *txq_info;
+
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, i);
+ if (txq_info->state & SFC_TXQ_INITIALIZED) {
+ union sfc_pkts_bytes qstats;
+
+ sfc_pkts_bytes_get(&txq_info->dp->dpq.stats, &qstats);
+ pkts_sum += qstats.pkts -
+ sa->sw_stats.reset_tx_pkts[i];
+ bytes_sum += qstats.bytes -
+ sa->sw_stats.reset_tx_bytes[i];
+ }
+ }
+
+ *pkts = pkts_sum;
+ *bytes = bytes_sum;
+}
+
/*
* Some statistics are computed as A - B where A and B each increase
* monotonically with some hardware counter(s) and the counters are read
static int
sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
+ bool have_dp_rx_stats = sap->dp_rx->features & SFC_DP_RX_FEAT_STATS;
+ bool have_dp_tx_stats = sap->dp_tx->features & SFC_DP_TX_FEAT_STATS;
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
uint64_t *mac_stats;
sfc_adapter_lock(sa);
- ret = sfc_port_update_mac_stats(sa);
+ if (have_dp_rx_stats)
+ sfc_stats_get_dp_rx(sa, &stats->ipackets, &stats->ibytes);
+ if (have_dp_tx_stats)
+ sfc_stats_get_dp_tx(sa, &stats->opackets, &stats->obytes);
+
+ ret = sfc_port_update_mac_stats(sa, B_FALSE);
if (ret != 0)
goto unlock;
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
- stats->ipackets =
- mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
- mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
- mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
- stats->opackets =
- mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
- mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
- mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
- stats->ibytes =
- mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
- mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
- mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
- stats->obytes =
- mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
- mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
- mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
+ if (!have_dp_rx_stats) {
+ stats->ipackets =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
+ stats->ibytes =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
+
+ /* CRC is included in these stats, but shouldn't be */
+ stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
+ }
+ if (!have_dp_tx_stats) {
+ stats->opackets =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
+ stats->obytes =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
+
+ /* CRC is included in these stats, but shouldn't be */
+ stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN;
+ }
stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
-
- /* CRC is included in these stats, but shouldn't be */
- stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
- stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN;
} else {
- stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
- stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
- stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
-
- /* CRC is included in these stats, but shouldn't be */
- stats->ibytes -= mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN;
- stats->obytes -= mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN;
+ if (!have_dp_tx_stats) {
+ stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
+ stats->obytes = mac_stats[EFX_MAC_TX_OCTETS] -
+ mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN;
+ }
/*
* Take into account stats which are whenever supported
mac_stats[EFX_MAC_RX_JABBER_PKTS];
/* no oerrors counters supported on EF10 */
- /* Exclude missed, errors and pauses from Rx packets */
- sfc_update_diff_stat(&port->ipackets,
- mac_stats[EFX_MAC_RX_PKTS] -
- mac_stats[EFX_MAC_RX_PAUSE_PKTS] -
- stats->imissed - stats->ierrors);
- stats->ipackets = port->ipackets;
+ if (!have_dp_rx_stats) {
+ /* Exclude missed, errors and pauses from Rx packets */
+ sfc_update_diff_stat(&port->ipackets,
+ mac_stats[EFX_MAC_RX_PKTS] -
+ mac_stats[EFX_MAC_RX_PAUSE_PKTS] -
+ stats->imissed - stats->ierrors);
+ stats->ipackets = port->ipackets;
+ stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS] -
+ mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN;
+ }
}
unlock:
if (rc != 0)
sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
+ sfc_sw_xstats_reset(sa);
+
sfc_adapter_unlock(sa);
SFC_ASSERT(rc >= 0);
return -rc;
}
+static unsigned int
+sfc_xstats_get_nb_supported(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ unsigned int nb_supported;
+
+ sfc_adapter_lock(sa);
+ nb_supported = port->mac_stats_nb_supported +
+ sfc_sw_xstats_get_nb_supported(sa);
+ sfc_adapter_unlock(sa);
+
+ return nb_supported;
+}
+
static int
sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned int xstats_count)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
- struct sfc_port *port = &sa->port;
- uint64_t *mac_stats;
+ unsigned int nb_written = 0;
+ unsigned int nb_supported = 0;
int rc;
- unsigned int i;
- int nstats = 0;
- sfc_adapter_lock(sa);
+ if (unlikely(xstats == NULL))
+ return sfc_xstats_get_nb_supported(sa);
- rc = sfc_port_update_mac_stats(sa);
- if (rc != 0) {
- SFC_ASSERT(rc > 0);
- nstats = -rc;
- goto unlock;
- }
+ rc = sfc_port_get_mac_stats(sa, xstats, xstats_count, &nb_written);
+ if (rc < 0)
+ return rc;
- mac_stats = port->mac_stats_buf;
+ nb_supported = rc;
+ sfc_sw_xstats_get_vals(sa, xstats, xstats_count, &nb_written,
+ &nb_supported);
- for (i = 0; i < EFX_MAC_NSTATS; ++i) {
- if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
- if (xstats != NULL && nstats < (int)xstats_count) {
- xstats[nstats].id = nstats;
- xstats[nstats].value = mac_stats[i];
- }
- nstats++;
- }
- }
-
-unlock:
- sfc_adapter_unlock(sa);
-
- return nstats;
+ return nb_supported;
}
static int
struct sfc_port *port = &sa->port;
unsigned int i;
unsigned int nstats = 0;
+ unsigned int nb_written = 0;
+ int ret;
+
+ if (unlikely(xstats_names == NULL))
+ return sfc_xstats_get_nb_supported(sa);
for (i = 0; i < EFX_MAC_NSTATS; ++i) {
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
- if (xstats_names != NULL && nstats < xstats_count)
+ if (nstats < xstats_count) {
strlcpy(xstats_names[nstats].name,
efx_mac_stat_name(sa->nic, i),
sizeof(xstats_names[0].name));
+ nb_written++;
+ }
nstats++;
}
}
+ ret = sfc_sw_xstats_get_names(sa, xstats_names, xstats_count,
+ &nb_written, &nstats);
+ if (ret != 0) {
+ SFC_ASSERT(ret < 0);
+ return ret;
+ }
+
return nstats;
}
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
- uint64_t *mac_stats;
- unsigned int nb_supported = 0;
- unsigned int nb_written = 0;
+ unsigned int nb_supported;
unsigned int i;
- int ret;
int rc;
if (unlikely(ids == NULL || values == NULL))
return -EINVAL;
- sfc_adapter_lock(sa);
-
- rc = sfc_port_update_mac_stats(sa);
- if (rc != 0) {
- SFC_ASSERT(rc > 0);
- ret = -rc;
- goto unlock;
- }
-
- mac_stats = port->mac_stats_buf;
+ /*
+ * Values array could be filled in nonsequential order. Fill values with
+ * constant indicating invalid ID first.
+ */
+ for (i = 0; i < n; i++)
+ values[i] = SFC_XSTAT_ID_INVALID_VAL;
- for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
- if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
- continue;
+ rc = sfc_port_get_mac_stats_by_id(sa, ids, values, n);
+ if (rc != 0)
+ return rc;
- if (ids[nb_written] == nb_supported)
- values[nb_written++] = mac_stats[i];
+ nb_supported = port->mac_stats_nb_supported;
+ sfc_sw_xstats_get_vals_by_id(sa, ids, values, n, &nb_supported);
- ++nb_supported;
+ /* Return number of written stats before invalid ID is encountered. */
+ for (i = 0; i < n; i++) {
+ if (values[i] == SFC_XSTAT_ID_INVALID_VAL)
+ return i;
}
- ret = nb_written;
-
-unlock:
- sfc_adapter_unlock(sa);
-
- return ret;
+ return n;
}
static int
sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ const uint64_t *ids,
struct rte_eth_xstat_name *xstats_names,
- const uint64_t *ids, unsigned int size)
+ unsigned int size)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
- unsigned int nb_supported = 0;
- unsigned int nb_written = 0;
+ unsigned int nb_supported;
unsigned int i;
+ int ret;
if (unlikely(xstats_names == NULL && ids != NULL) ||
unlikely(xstats_names != NULL && ids == NULL))
return -EINVAL;
- sfc_adapter_lock(sa);
+ if (unlikely(xstats_names == NULL && ids == NULL))
+ return sfc_xstats_get_nb_supported(sa);
- if (unlikely(xstats_names == NULL && ids == NULL)) {
- nb_supported = port->mac_stats_nb_supported;
- sfc_adapter_unlock(sa);
- return nb_supported;
- }
+ /*
+ * Names array could be filled in nonsequential order. Fill names with
+ * string indicating invalid ID first.
+ */
+ for (i = 0; i < size; i++)
+ xstats_names[i].name[0] = SFC_XSTAT_ID_INVALID_NAME;
- for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
- if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
- continue;
+ sfc_adapter_lock(sa);
- if (ids[nb_written] == nb_supported) {
- char *name = xstats_names[nb_written++].name;
+ SFC_ASSERT(port->mac_stats_nb_supported <=
+ RTE_DIM(port->mac_stats_by_id));
- strlcpy(name, efx_mac_stat_name(sa->nic, i),
+ for (i = 0; i < size; i++) {
+ if (ids[i] < port->mac_stats_nb_supported) {
+ strlcpy(xstats_names[i].name,
+ efx_mac_stat_name(sa->nic,
+ port->mac_stats_by_id[ids[i]]),
sizeof(xstats_names[0].name));
}
-
- ++nb_supported;
}
+ nb_supported = port->mac_stats_nb_supported;
+
sfc_adapter_unlock(sa);
- return nb_written;
+ ret = sfc_sw_xstats_get_names_by_id(sa, ids, xstats_names, size,
+ &nb_supported);
+ if (ret != 0) {
+ SFC_ASSERT(ret < 0);
+ return ret;
+ }
+
+ /* Return number of written names before invalid ID is encountered. */
+ for (i = 0; i < size; i++) {
+ if (xstats_names[i].name[0] == SFC_XSTAT_ID_INVALID_NAME)
+ return i;
+ }
+
+ return size;
}
static int
return sap->dp_rx->qdesc_npending(rxq_info->dp);
}
-/*
- * The function is used by the secondary process as well. It must not
- * use any process-local pointers from the adapter data.
- */
-static int
-sfc_rx_descriptor_done(void *queue, uint16_t offset)
-{
- struct sfc_dp_rxq *dp_rxq = queue;
- const struct sfc_dp_rx *dp_rx;
-
- dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
-
- return offset < dp_rx->qdesc_npending(dp_rxq);
-}
-
/*
* The function is used by the secondary process as well. It must not
* use any process-local pointers from the adapter data.
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->rx_queue_count = sfc_rx_queue_count;
- dev->rx_descriptor_done = sfc_rx_descriptor_done;
dev->rx_descriptor_status = sfc_rx_descriptor_status;
dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_ops;
dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->rx_queue_count = sfc_rx_queue_count;
- dev->rx_descriptor_done = sfc_rx_descriptor_done;
dev->rx_descriptor_status = sfc_rx_descriptor_status;
dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_secondary_ops;
}
}
+static int
+sfc_parse_switch_mode(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const char *switch_mode = NULL;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_SWITCH_MODE,
+ sfc_kvarg_string_handler, &switch_mode);
+ if (rc != 0)
+ goto fail_kvargs;
+
+ if (switch_mode == NULL) {
+ sa->switchdev = encp->enc_mae_supported &&
+ !encp->enc_datapath_cap_evb;
+ } else if (strcasecmp(switch_mode, SFC_KVARG_SWITCH_MODE_LEGACY) == 0) {
+ sa->switchdev = false;
+ } else if (strcasecmp(switch_mode,
+ SFC_KVARG_SWITCH_MODE_SWITCHDEV) == 0) {
+ sa->switchdev = true;
+ } else {
+ sfc_err(sa, "invalid switch mode device argument '%s'",
+ switch_mode);
+ rc = EINVAL;
+ goto fail_mode;
+ }
+
+ sfc_log_init(sa, "done");
+
+ return 0;
+
+fail_mode:
+fail_kvargs:
+ sfc_log_init(sa, "failed: %s", rte_strerror(rc));
+
+ return rc;
+}
+
static int
sfc_eth_dev_init(struct rte_eth_dev *dev)
{
if (rc != 0)
goto fail_probe;
+ /*
+ * Selecting a default switch mode requires the NIC to be probed and
+ * to have its capabilities filled in.
+ */
+ rc = sfc_parse_switch_mode(sa);
+ if (rc != 0)
+ goto fail_switch_mode;
+
sfc_log_init(sa, "set device ops");
rc = sfc_eth_dev_set_ops(dev);
if (rc != 0)
if (rc != 0)
goto fail_attach;
+ if (sa->switchdev && sa->mae.status != SFC_MAE_STATUS_SUPPORTED) {
+ sfc_err(sa,
+ "failed to enable switchdev mode without MAE support");
+ rc = ENOTSUP;
+ goto fail_switchdev_no_mae;
+ }
+
encp = efx_nic_cfg_get(sa->nic);
/*
sfc_log_init(sa, "done");
return 0;
+fail_switchdev_no_mae:
+ sfc_detach(sa);
+
fail_attach:
sfc_eth_dev_clear_ops(dev);
fail_set_ops:
+fail_switch_mode:
sfc_unprobe(sa);
fail_probe:
RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
+ SFC_KVARG_SWITCH_MODE "=" SFC_KVARG_VALUES_SWITCH_MODE " "
SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "