sfc_adapter_unlock(sa);
}
+static void
+sfc_stats_get_dp_rx(struct sfc_adapter *sa, uint64_t *pkts, uint64_t *bytes)
+{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ uint64_t pkts_sum = 0;
+ uint64_t bytes_sum = 0;
+ unsigned int i;
+
+ for (i = 0; i < sas->ethdev_rxq_count; ++i) {
+ struct sfc_rxq_info *rxq_info;
+
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, i);
+ if (rxq_info->state & SFC_RXQ_INITIALIZED) {
+ union sfc_pkts_bytes qstats;
+
+ sfc_pkts_bytes_get(&rxq_info->dp->dpq.stats, &qstats);
+ pkts_sum += qstats.pkts -
+ sa->sw_stats.reset_rx_pkts[i];
+ bytes_sum += qstats.bytes -
+ sa->sw_stats.reset_rx_bytes[i];
+ }
+ }
+
+ *pkts = pkts_sum;
+ *bytes = bytes_sum;
+}
+
+static void
+sfc_stats_get_dp_tx(struct sfc_adapter *sa, uint64_t *pkts, uint64_t *bytes)
+{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ uint64_t pkts_sum = 0;
+ uint64_t bytes_sum = 0;
+ unsigned int i;
+
+ for (i = 0; i < sas->ethdev_txq_count; ++i) {
+ struct sfc_txq_info *txq_info;
+
+ txq_info = sfc_txq_info_by_ethdev_qid(sas, i);
+ if (txq_info->state & SFC_TXQ_INITIALIZED) {
+ union sfc_pkts_bytes qstats;
+
+ sfc_pkts_bytes_get(&txq_info->dp->dpq.stats, &qstats);
+ pkts_sum += qstats.pkts -
+ sa->sw_stats.reset_tx_pkts[i];
+ bytes_sum += qstats.bytes -
+ sa->sw_stats.reset_tx_bytes[i];
+ }
+ }
+
+ *pkts = pkts_sum;
+ *bytes = bytes_sum;
+}
+
/*
* Some statistics are computed as A - B where A and B each increase
* monotonically with some hardware counter(s) and the counters are read
static int
sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
+ bool have_dp_rx_stats = sap->dp_rx->features & SFC_DP_RX_FEAT_STATS;
+ bool have_dp_tx_stats = sap->dp_tx->features & SFC_DP_TX_FEAT_STATS;
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
uint64_t *mac_stats;
sfc_adapter_lock(sa);
+ if (have_dp_rx_stats)
+ sfc_stats_get_dp_rx(sa, &stats->ipackets, &stats->ibytes);
+ if (have_dp_tx_stats)
+ sfc_stats_get_dp_tx(sa, &stats->opackets, &stats->obytes);
+
ret = sfc_port_update_mac_stats(sa, B_FALSE);
if (ret != 0)
goto unlock;
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
- stats->ipackets =
- mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
- mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
- mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
- stats->opackets =
- mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
- mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
- mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
- stats->ibytes =
- mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
- mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
- mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
- stats->obytes =
- mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
- mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
- mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
+ if (!have_dp_rx_stats) {
+ stats->ipackets =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
+ stats->ibytes =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
+
+ /* CRC is included in these stats, but shouldn't be */
+ stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
+ }
+ if (!have_dp_tx_stats) {
+ stats->opackets =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
+ stats->obytes =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
+
+ /* CRC is included in these stats, but shouldn't be */
+ stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN;
+ }
stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
-
- /* CRC is included in these stats, but shouldn't be */
- stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
- stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN;
} else {
- stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
- stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
- stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
-
- /* CRC is included in these stats, but shouldn't be */
- stats->ibytes -= mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN;
- stats->obytes -= mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN;
+ if (!have_dp_tx_stats) {
+ stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
+ stats->obytes = mac_stats[EFX_MAC_TX_OCTETS] -
+ mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN;
+ }
/*
* Take into account stats which are whenever supported
mac_stats[EFX_MAC_RX_JABBER_PKTS];
/* no oerrors counters supported on EF10 */
- /* Exclude missed, errors and pauses from Rx packets */
- sfc_update_diff_stat(&port->ipackets,
- mac_stats[EFX_MAC_RX_PKTS] -
- mac_stats[EFX_MAC_RX_PAUSE_PKTS] -
- stats->imissed - stats->ierrors);
- stats->ipackets = port->ipackets;
+ if (!have_dp_rx_stats) {
+ /* Exclude missed, errors and pauses from Rx packets */
+ sfc_update_diff_stat(&port->ipackets,
+ mac_stats[EFX_MAC_RX_PKTS] -
+ mac_stats[EFX_MAC_RX_PAUSE_PKTS] -
+ stats->imissed - stats->ierrors);
+ stats->ipackets = port->ipackets;
+ stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS] -
+ mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN;
+ }
}
unlock:
}
}
+static int
+sfc_parse_switch_mode(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const char *switch_mode = NULL;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_SWITCH_MODE,
+ sfc_kvarg_string_handler, &switch_mode);
+ if (rc != 0)
+ goto fail_kvargs;
+
+ if (switch_mode == NULL) {
+ sa->switchdev = encp->enc_mae_supported &&
+ !encp->enc_datapath_cap_evb;
+ } else if (strcasecmp(switch_mode, SFC_KVARG_SWITCH_MODE_LEGACY) == 0) {
+ sa->switchdev = false;
+ } else if (strcasecmp(switch_mode,
+ SFC_KVARG_SWITCH_MODE_SWITCHDEV) == 0) {
+ sa->switchdev = true;
+ } else {
+ sfc_err(sa, "invalid switch mode device argument '%s'",
+ switch_mode);
+ rc = EINVAL;
+ goto fail_mode;
+ }
+
+ sfc_log_init(sa, "done");
+
+ return 0;
+
+fail_mode:
+fail_kvargs:
+ sfc_log_init(sa, "failed: %s", rte_strerror(rc));
+
+ return rc;
+}
+
static int
sfc_eth_dev_init(struct rte_eth_dev *dev)
{
if (rc != 0)
goto fail_probe;
+ /*
+ * Selecting a default switch mode requires the NIC to be probed and
+ * to have its capabilities filled in.
+ */
+ rc = sfc_parse_switch_mode(sa);
+ if (rc != 0)
+ goto fail_switch_mode;
+
sfc_log_init(sa, "set device ops");
rc = sfc_eth_dev_set_ops(dev);
if (rc != 0)
if (rc != 0)
goto fail_attach;
+ if (sa->switchdev && sa->mae.status != SFC_MAE_STATUS_SUPPORTED) {
+ sfc_err(sa,
+ "failed to enable switchdev mode without MAE support");
+ rc = ENOTSUP;
+ goto fail_switchdev_no_mae;
+ }
+
encp = efx_nic_cfg_get(sa->nic);
/*
sfc_log_init(sa, "done");
return 0;
+fail_switchdev_no_mae:
+ sfc_detach(sa);
+
fail_attach:
sfc_eth_dev_clear_ops(dev);
fail_set_ops:
+fail_switch_mode:
sfc_unprobe(sa);
fail_probe:
RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
+ SFC_KVARG_SWITCH_MODE "=" SFC_KVARG_VALUES_SWITCH_MODE " "
SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "