+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+
+ return sap->dp_rx->intr_disable(rxq_info->dp);
+}
+
+struct sfc_mport_journal_ctx {
+ struct sfc_adapter *sa;
+ uint16_t switch_domain_id;
+ uint32_t mcdi_handle;
+ bool controllers_assigned;
+ efx_pcie_interface_t *controllers;
+ size_t nb_controllers;
+};
+
+static int
+sfc_journal_ctx_add_controller(struct sfc_mport_journal_ctx *ctx,
+ efx_pcie_interface_t intf)
+{
+ efx_pcie_interface_t *new_controllers;
+ size_t i, target;
+ size_t new_size;
+
+ if (ctx->controllers == NULL) {
+ ctx->controllers = rte_malloc("sfc_controller_mapping",
+ sizeof(ctx->controllers[0]), 0);
+ if (ctx->controllers == NULL)
+ return ENOMEM;
+
+ ctx->controllers[0] = intf;
+ ctx->nb_controllers = 1;
+
+ return 0;
+ }
+
+ for (i = 0; i < ctx->nb_controllers; i++) {
+ if (ctx->controllers[i] == intf)
+ return 0;
+ if (ctx->controllers[i] > intf)
+ break;
+ }
+ target = i;
+
+ ctx->nb_controllers += 1;
+ new_size = ctx->nb_controllers * sizeof(ctx->controllers[0]);
+
+ new_controllers = rte_realloc(ctx->controllers, new_size, 0);
+ if (new_controllers == NULL) {
+ rte_free(ctx->controllers);
+ return ENOMEM;
+ }
+ ctx->controllers = new_controllers;
+
+ for (i = target + 1; i < ctx->nb_controllers; i++)
+ ctx->controllers[i] = ctx->controllers[i - 1];
+
+ ctx->controllers[target] = intf;
+
+ return 0;
+}
+
+static efx_rc_t
+sfc_process_mport_journal_entry(struct sfc_mport_journal_ctx *ctx,
+ efx_mport_desc_t *mport)
+{
+ efx_mport_sel_t ethdev_mport;
+ int rc;
+
+ sfc_dbg(ctx->sa,
+ "processing mport id %u (controller %u pf %u vf %u)",
+ mport->emd_id.id, mport->emd_vnic.ev_intf,
+ mport->emd_vnic.ev_pf, mport->emd_vnic.ev_vf);
+ efx_mae_mport_invalid(ðdev_mport);
+
+ if (!ctx->controllers_assigned) {
+ rc = sfc_journal_ctx_add_controller(ctx,
+ mport->emd_vnic.ev_intf);
+ if (rc != 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static efx_rc_t
+sfc_process_mport_journal_cb(void *data, efx_mport_desc_t *mport,
+ size_t mport_len)
+{
+ struct sfc_mport_journal_ctx *ctx = data;
+
+ if (ctx == NULL || ctx->sa == NULL) {
+ sfc_err(ctx->sa, "received NULL context or SFC adapter");
+ return EINVAL;
+ }
+
+ if (mport_len != sizeof(*mport)) {
+ sfc_err(ctx->sa, "actual and expected mport buffer sizes differ");
+ return EINVAL;
+ }
+
+ SFC_ASSERT(sfc_adapter_is_locked(ctx->sa));
+
+ /*
+ * If a zombie flag is set, it means the mport has been marked for
+ * deletion and cannot be used for any new operations. The mport will
+ * be destroyed completely once all references to it are released.
+ */
+ if (mport->emd_zombie) {
+ sfc_dbg(ctx->sa, "mport is a zombie, skipping");
+ return 0;
+ }
+ if (mport->emd_type != EFX_MPORT_TYPE_VNIC) {
+ sfc_dbg(ctx->sa, "mport is not a VNIC, skipping");
+ return 0;
+ }
+ if (mport->emd_vnic.ev_client_type != EFX_MPORT_VNIC_CLIENT_FUNCTION) {
+ sfc_dbg(ctx->sa, "mport is not a function, skipping");
+ return 0;
+ }
+ if (mport->emd_vnic.ev_handle == ctx->mcdi_handle) {
+ sfc_dbg(ctx->sa, "mport is this driver instance, skipping");
+ return 0;
+ }
+
+ return sfc_process_mport_journal_entry(ctx, mport);
+}
+
+static int
+sfc_process_mport_journal(struct sfc_adapter *sa)
+{
+ struct sfc_mport_journal_ctx ctx;
+ const efx_pcie_interface_t *controllers;
+ size_t nb_controllers;
+ efx_rc_t efx_rc;
+ int rc;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.sa = sa;
+ ctx.switch_domain_id = sa->mae.switch_domain_id;
+
+ efx_rc = efx_mcdi_get_own_client_handle(sa->nic, &ctx.mcdi_handle);
+ if (efx_rc != 0) {
+ sfc_err(sa, "failed to get own MCDI handle");
+ SFC_ASSERT(efx_rc > 0);
+ return efx_rc;
+ }
+
+ rc = sfc_mae_switch_domain_controllers(ctx.switch_domain_id,
+ &controllers, &nb_controllers);
+ if (rc != 0) {
+ sfc_err(sa, "failed to get controller mapping");
+ return rc;
+ }
+
+ ctx.controllers_assigned = controllers != NULL;
+ ctx.controllers = NULL;
+ ctx.nb_controllers = 0;
+
+ efx_rc = efx_mae_read_mport_journal(sa->nic,
+ sfc_process_mport_journal_cb, &ctx);
+ if (efx_rc != 0) {
+ sfc_err(sa, "failed to process MAE mport journal");
+ SFC_ASSERT(efx_rc > 0);
+ return efx_rc;
+ }
+
+ if (controllers == NULL) {
+ rc = sfc_mae_switch_domain_map_controllers(ctx.switch_domain_id,
+ ctx.controllers,
+ ctx.nb_controllers);
+ if (rc != 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static const struct eth_dev_ops sfc_eth_dev_ops = {
+ .dev_configure = sfc_dev_configure,
+ .dev_start = sfc_dev_start,
+ .dev_stop = sfc_dev_stop,
+ .dev_set_link_up = sfc_dev_set_link_up,
+ .dev_set_link_down = sfc_dev_set_link_down,
+ .dev_close = sfc_dev_close,
+ .promiscuous_enable = sfc_dev_promisc_enable,
+ .promiscuous_disable = sfc_dev_promisc_disable,
+ .allmulticast_enable = sfc_dev_allmulti_enable,
+ .allmulticast_disable = sfc_dev_allmulti_disable,
+ .link_update = sfc_dev_link_update,
+ .stats_get = sfc_stats_get,
+ .stats_reset = sfc_stats_reset,
+ .xstats_get = sfc_xstats_get,
+ .xstats_reset = sfc_stats_reset,
+ .xstats_get_names = sfc_xstats_get_names,
+ .dev_infos_get = sfc_dev_infos_get,
+ .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
+ .mtu_set = sfc_dev_set_mtu,
+ .rx_queue_start = sfc_rx_queue_start,
+ .rx_queue_stop = sfc_rx_queue_stop,
+ .tx_queue_start = sfc_tx_queue_start,
+ .tx_queue_stop = sfc_tx_queue_stop,
+ .rx_queue_setup = sfc_rx_queue_setup,
+ .rx_queue_release = sfc_rx_queue_release,
+ .rx_queue_intr_enable = sfc_rx_queue_intr_enable,
+ .rx_queue_intr_disable = sfc_rx_queue_intr_disable,
+ .tx_queue_setup = sfc_tx_queue_setup,
+ .tx_queue_release = sfc_tx_queue_release,
+ .flow_ctrl_get = sfc_flow_ctrl_get,
+ .flow_ctrl_set = sfc_flow_ctrl_set,
+ .mac_addr_set = sfc_mac_addr_set,
+ .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
+ .reta_update = sfc_dev_rss_reta_update,
+ .reta_query = sfc_dev_rss_reta_query,
+ .rss_hash_update = sfc_dev_rss_hash_update,
+ .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
+ .flow_ops_get = sfc_dev_flow_ops_get,
+ .set_mc_addr_list = sfc_set_mc_addr_list,
+ .rxq_info_get = sfc_rx_queue_info_get,
+ .txq_info_get = sfc_tx_queue_info_get,
+ .fw_version_get = sfc_fw_version_get,
+ .xstats_get_by_id = sfc_xstats_get_by_id,
+ .xstats_get_names_by_id = sfc_xstats_get_names_by_id,
+ .pool_ops_supported = sfc_pool_ops_supported,
+};
+
+struct sfc_ethdev_init_data {
+ uint16_t nb_representors;
+};
+
+/**
+ * Duplicate a string in potentially shared memory required for
+ * multi-process support.
+ *
+ * strdup() allocates from process-local heap/memory.
+ */
+static char *
+sfc_strdup(const char *str)
+{
+ size_t size;
+ char *copy;
+
+ if (str == NULL)
+ return NULL;
+
+ size = strlen(str) + 1;
+ copy = rte_malloc(__func__, size, 0);
+ if (copy != NULL)
+ rte_memcpy(copy, str, size);
+
+ return copy;
+}
+
+static int
+sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ const struct sfc_dp_rx *dp_rx;
+ const struct sfc_dp_tx *dp_tx;
+ const efx_nic_cfg_t *encp;
+ unsigned int avail_caps = 0;
+ const char *rx_name = NULL;
+ const char *tx_name = NULL;
+ int rc;
+
+ switch (sa->family) {
+ case EFX_FAMILY_HUNTINGTON:
+ case EFX_FAMILY_MEDFORD:
+ case EFX_FAMILY_MEDFORD2:
+ avail_caps |= SFC_DP_HW_FW_CAP_EF10;
+ avail_caps |= SFC_DP_HW_FW_CAP_RX_EFX;
+ avail_caps |= SFC_DP_HW_FW_CAP_TX_EFX;
+ break;
+ case EFX_FAMILY_RIVERHEAD:
+ avail_caps |= SFC_DP_HW_FW_CAP_EF100;
+ break;
+ default:
+ break;
+ }
+
+ encp = efx_nic_cfg_get(sa->nic);
+ if (encp->enc_rx_es_super_buffer_supported)
+ avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
+ sfc_kvarg_string_handler, &rx_name);
+ if (rc != 0)
+ goto fail_kvarg_rx_datapath;
+
+ if (rx_name != NULL) {
+ dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
+ if (dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath %s not found", rx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Rx datapath %s",
+ rx_name);
+ rc = EINVAL;
+ goto fail_dp_rx_caps;
+ }
+ } else {
+ dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
+ if (dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ }
+
+ sas->dp_rx_name = sfc_strdup(dp_rx->dp.name);
+ if (sas->dp_rx_name == NULL) {
+ rc = ENOMEM;
+ goto fail_dp_rx_name;
+ }
+
+ sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name);
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
+ sfc_kvarg_string_handler, &tx_name);
+ if (rc != 0)
+ goto fail_kvarg_tx_datapath;
+
+ if (tx_name != NULL) {
+ dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
+ if (dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath %s not found", tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Tx datapath %s",
+ tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx_caps;
+ }
+ } else {
+ dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
+ if (dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ }
+
+ sas->dp_tx_name = sfc_strdup(dp_tx->dp.name);
+ if (sas->dp_tx_name == NULL) {
+ rc = ENOMEM;
+ goto fail_dp_tx_name;
+ }
+
+ sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name);
+
+ sa->priv.dp_rx = dp_rx;
+ sa->priv.dp_tx = dp_tx;
+
+ dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_prepare = dp_tx->pkt_prepare;
+ dev->tx_pkt_burst = dp_tx->pkt_burst;
+
+ dev->rx_queue_count = sfc_rx_queue_count;
+ dev->rx_descriptor_status = sfc_rx_descriptor_status;
+ dev->tx_descriptor_status = sfc_tx_descriptor_status;
+ dev->dev_ops = &sfc_eth_dev_ops;
+
+ return 0;
+
+fail_dp_tx_name:
+fail_dp_tx_caps:
+fail_dp_tx:
+fail_kvarg_tx_datapath:
+ rte_free(sas->dp_rx_name);
+ sas->dp_rx_name = NULL;
+
+fail_dp_rx_name:
+fail_dp_rx_caps:
+fail_dp_rx:
+fail_kvarg_rx_datapath:
+ return rc;
+}
+
+static void
+sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+
+ dev->dev_ops = NULL;
+ dev->tx_pkt_prepare = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ rte_free(sas->dp_tx_name);
+ sas->dp_tx_name = NULL;
+ sa->priv.dp_tx = NULL;
+
+ rte_free(sas->dp_rx_name);
+ sas->dp_rx_name = NULL;
+ sa->priv.dp_rx = NULL;
+}
+
+static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
+ .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
+ .reta_query = sfc_dev_rss_reta_query,
+ .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
+ .rxq_info_get = sfc_rx_queue_info_get,
+ .txq_info_get = sfc_tx_queue_info_get,
+};
+
+static int
+sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
+{
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ struct sfc_adapter_priv *sap;
+ const struct sfc_dp_rx *dp_rx;
+ const struct sfc_dp_tx *dp_tx;
+ int rc;
+
+ /*
+ * Allocate process private data from heap, since it should not
+ * be located in shared memory allocated using rte_malloc() API.
+ */
+ sap = calloc(1, sizeof(*sap));
+ if (sap == NULL) {
+ rc = ENOMEM;
+ goto fail_alloc_priv;
+ }
+
+ sap->logtype_main = logtype_main;
+
+ dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name);
+ if (dp_rx == NULL) {
+ SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
+ "cannot find %s Rx datapath", sas->dp_rx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
+ SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
+ "%s Rx datapath does not support multi-process",
+ sas->dp_rx_name);
+ rc = EINVAL;
+ goto fail_dp_rx_multi_process;
+ }
+
+ dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name);
+ if (dp_tx == NULL) {
+ SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
+ "cannot find %s Tx datapath", sas->dp_tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
+ SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
+ "%s Tx datapath does not support multi-process",
+ sas->dp_tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx_multi_process;
+ }
+
+ sap->dp_rx = dp_rx;
+ sap->dp_tx = dp_tx;
+
+ dev->process_private = sap;
+ dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_prepare = dp_tx->pkt_prepare;
+ dev->tx_pkt_burst = dp_tx->pkt_burst;
+ dev->rx_queue_count = sfc_rx_queue_count;
+ dev->rx_descriptor_status = sfc_rx_descriptor_status;
+ dev->tx_descriptor_status = sfc_tx_descriptor_status;
+ dev->dev_ops = &sfc_eth_dev_secondary_ops;
+
+ return 0;
+
+fail_dp_tx_multi_process:
+fail_dp_tx:
+fail_dp_rx_multi_process:
+fail_dp_rx:
+ free(sap);
+
+fail_alloc_priv:
+ return rc;
+}
+
+static void
+sfc_register_dp(void)
+{
+ /* Register once */
+ if (TAILQ_EMPTY(&sfc_dp_head)) {
+ /* Prefer EF10 datapath */
+ sfc_dp_register(&sfc_dp_head, &sfc_ef100_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
+
+ sfc_dp_register(&sfc_dp_head, &sfc_ef100_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
+ }
+}
+
+static int
+sfc_parse_switch_mode(struct sfc_adapter *sa, bool has_representors)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const char *switch_mode = NULL;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_SWITCH_MODE,
+ sfc_kvarg_string_handler, &switch_mode);
+ if (rc != 0)
+ goto fail_kvargs;
+
+ if (switch_mode == NULL) {
+ sa->switchdev = encp->enc_mae_supported &&
+ (!encp->enc_datapath_cap_evb ||
+ has_representors);
+ } else if (strcasecmp(switch_mode, SFC_KVARG_SWITCH_MODE_LEGACY) == 0) {
+ sa->switchdev = false;
+ } else if (strcasecmp(switch_mode,
+ SFC_KVARG_SWITCH_MODE_SWITCHDEV) == 0) {
+ sa->switchdev = true;
+ } else {
+ sfc_err(sa, "invalid switch mode device argument '%s'",
+ switch_mode);
+ rc = EINVAL;
+ goto fail_mode;
+ }
+
+ sfc_log_init(sa, "done");
+
+ return 0;
+
+fail_mode:
+fail_kvargs:
+ sfc_log_init(sa, "failed: %s", rte_strerror(rc));
+
+ return rc;
+}
+
+static int
+sfc_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
+{
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct sfc_ethdev_init_data *init_data = init_params;
+ uint32_t logtype_main;
+ struct sfc_adapter *sa;
+ int rc;
+ const efx_nic_cfg_t *encp;
+ const struct rte_ether_addr *from;
+ int ret;
+
+ if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
+ SFC_EFX_DEV_CLASS_NET) {
+ SFC_GENERIC_LOG(DEBUG,
+ "Incompatible device class: skip probing, should be probed by other sfc driver.");
+ return 1;
+ }
+
+ rc = sfc_dp_mport_register();
+ if (rc != 0)
+ return rc;
+
+ sfc_register_dp();
+
+ logtype_main = sfc_register_logtype(&pci_dev->addr,
+ SFC_LOGTYPE_MAIN_STR,
+ RTE_LOG_NOTICE);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -sfc_eth_dev_secondary_init(dev, logtype_main);
+
+ /* Required for logging */
+ ret = snprintf(sas->log_prefix, sizeof(sas->log_prefix),
+ "PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu16 ": ",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function,
+ dev->data->port_id);
+ if (ret < 0 || ret >= (int)sizeof(sas->log_prefix)) {
+ SFC_GENERIC_LOG(ERR,
+ "reserved log prefix is too short for " PCI_PRI_FMT,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ return -EINVAL;
+ }
+ sas->pci_addr = pci_dev->addr;
+ sas->port_id = dev->data->port_id;
+
+ /*
+ * Allocate process private data from heap, since it should not
+ * be located in shared memory allocated using rte_malloc() API.
+ */
+ sa = calloc(1, sizeof(*sa));
+ if (sa == NULL) {
+ rc = ENOMEM;
+ goto fail_alloc_sa;
+ }
+
+ dev->process_private = sa;
+
+ /* Required for logging */
+ sa->priv.shared = sas;
+ sa->priv.logtype_main = logtype_main;
+
+ sa->eth_dev = dev;
+
+ /* Copy PCI device info to the dev->data */
+ rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+ dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
+
+ rc = sfc_kvargs_parse(sa);
+ if (rc != 0)
+ goto fail_kvargs_parse;
+
+ sfc_log_init(sa, "entry");
+
+ dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
+ if (dev->data->mac_addrs == NULL) {
+ rc = ENOMEM;
+ goto fail_mac_addrs;
+ }
+
+ sfc_adapter_lock_init(sa);
+ sfc_adapter_lock(sa);
+
+ sfc_log_init(sa, "probing");
+ rc = sfc_probe(sa);
+ if (rc != 0)
+ goto fail_probe;
+
+ /*
+ * Selecting a default switch mode requires the NIC to be probed and
+ * to have its capabilities filled in.
+ */
+ rc = sfc_parse_switch_mode(sa, init_data->nb_representors > 0);
+ if (rc != 0)
+ goto fail_switch_mode;
+
+ sfc_log_init(sa, "set device ops");
+ rc = sfc_eth_dev_set_ops(dev);
+ if (rc != 0)
+ goto fail_set_ops;
+
+ sfc_log_init(sa, "attaching");
+ rc = sfc_attach(sa);
+ if (rc != 0)
+ goto fail_attach;
+
+ if (sa->switchdev && sa->mae.status != SFC_MAE_STATUS_SUPPORTED) {
+ sfc_err(sa,
+ "failed to enable switchdev mode without MAE support");
+ rc = ENOTSUP;
+ goto fail_switchdev_no_mae;
+ }
+
+ encp = efx_nic_cfg_get(sa->nic);
+
+ /*
+ * The arguments are really reverse order in comparison to
+ * Linux kernel. Copy from NIC config to Ethernet device data.
+ */
+ from = (const struct rte_ether_addr *)(encp->enc_mac_addr);
+ rte_ether_addr_copy(from, &dev->data->mac_addrs[0]);
+
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_switchdev_no_mae:
+ sfc_detach(sa);
+
+fail_attach:
+ sfc_eth_dev_clear_ops(dev);
+
+fail_set_ops:
+fail_switch_mode:
+ sfc_unprobe(sa);
+
+fail_probe:
+ sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+fail_mac_addrs:
+ sfc_kvargs_cleanup(sa);
+
+fail_kvargs_parse:
+ sfc_log_init(sa, "failed %d", rc);
+ dev->process_private = NULL;
+ free(sa);
+
+fail_alloc_sa:
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_eth_dev_uninit(struct rte_eth_dev *dev)
+{
+ sfc_dev_close(dev);