X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ethdev.c;h=f5986b610fff2f3dc412eb8dd36996d5cb48f7bd;hb=2f577f0ea1a3;hp=2e34647e87878f99041d0579e1257bfe1b07a90d;hpb=c377f1adf7093c6fab17833b289138d67db27563;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c index 2e34647e87..f5986b610f 100644 --- a/drivers/net/sfc/sfc_ethdev.c +++ b/drivers/net/sfc/sfc_ethdev.c @@ -26,10 +26,12 @@ #include "sfc_rx.h" #include "sfc_tx.h" #include "sfc_flow.h" +#include "sfc_flow_tunnel.h" #include "sfc_dp.h" #include "sfc_dp_rx.h" #include "sfc_repr.h" #include "sfc_sw_stats.h" +#include "sfc_switch.h" #define SFC_XSTAT_ID_INVALID_VAL UINT64_MAX #define SFC_XSTAT_ID_INVALID_NAME '\0' @@ -185,7 +187,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; - if (mae->status == SFC_MAE_STATUS_SUPPORTED) { + if (mae->status == SFC_MAE_STATUS_SUPPORTED || + mae->status == SFC_MAE_STATUS_ADMIN) { dev_info->switch_info.name = dev->device->driver->name; dev_info->switch_info.domain_id = mae->switch_domain_id; dev_info->switch_info.port_id = mae->switch_port_id; @@ -1138,17 +1141,6 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) } } - /* - * The driver does not use it, but other PMDs update jumbo frame - * flag and max_rx_pkt_len when MTU is set. - */ - if (mtu > RTE_ETHER_MTU) { - struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; - rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; - } - - dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; - sfc_adapter_unlock(sa); sfc_log_init(sa, "done"); @@ -1355,19 +1347,19 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid, * use any process-local pointers from the adapter data. */ static uint32_t -sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid) +sfc_rx_queue_count(void *rx_queue) { - const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); - struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); - sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid; + struct sfc_dp_rxq *dp_rxq = rx_queue; + const struct sfc_dp_rx *dp_rx; struct sfc_rxq_info *rxq_info; - rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid); + dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq); + rxq_info = sfc_rxq_info_by_dp_rxq(dp_rxq); if ((rxq_info->state & SFC_RXQ_STARTED) == 0) return 0; - return sap->dp_rx->qdesc_npending(rxq_info->dp); + return dp_rx->qdesc_npending(dp_rxq); } /* @@ -1918,6 +1910,430 @@ sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t ethdev_qid) return sap->dp_rx->intr_disable(rxq_info->dp); } +struct sfc_mport_journal_ctx { + struct sfc_adapter *sa; + uint16_t switch_domain_id; + uint32_t mcdi_handle; + bool controllers_assigned; + efx_pcie_interface_t *controllers; + size_t nb_controllers; +}; + +static int +sfc_journal_ctx_add_controller(struct sfc_mport_journal_ctx *ctx, + efx_pcie_interface_t intf) +{ + efx_pcie_interface_t *new_controllers; + size_t i, target; + size_t new_size; + + if (ctx->controllers == NULL) { + ctx->controllers = rte_malloc("sfc_controller_mapping", + sizeof(ctx->controllers[0]), 0); + if (ctx->controllers == NULL) + return ENOMEM; + + ctx->controllers[0] = intf; + ctx->nb_controllers = 1; + + return 0; + } + + for (i = 0; i < ctx->nb_controllers; i++) { + if (ctx->controllers[i] == intf) + return 0; + if (ctx->controllers[i] > intf) + break; + } + target = i; + + ctx->nb_controllers += 1; + new_size = ctx->nb_controllers * sizeof(ctx->controllers[0]); + + new_controllers = rte_realloc(ctx->controllers, new_size, 0); + if (new_controllers == NULL) { + rte_free(ctx->controllers); + return ENOMEM; + } + ctx->controllers = new_controllers; + + for (i = target + 1; i < ctx->nb_controllers; i++) + ctx->controllers[i] = ctx->controllers[i - 1]; + + ctx->controllers[target] = intf; + + return 0; +} + +static efx_rc_t +sfc_process_mport_journal_entry(struct sfc_mport_journal_ctx *ctx, + efx_mport_desc_t *mport) +{ + struct sfc_mae_switch_port_request req; + efx_mport_sel_t entity_selector; + efx_mport_sel_t ethdev_mport; + uint16_t switch_port_id; + efx_rc_t efx_rc; + int rc; + + sfc_dbg(ctx->sa, + "processing mport id %u (controller %u pf %u vf %u)", + mport->emd_id.id, mport->emd_vnic.ev_intf, + mport->emd_vnic.ev_pf, mport->emd_vnic.ev_vf); + efx_mae_mport_invalid(ðdev_mport); + + if (!ctx->controllers_assigned) { + rc = sfc_journal_ctx_add_controller(ctx, + mport->emd_vnic.ev_intf); + if (rc != 0) + return rc; + } + + /* Build Mport selector */ + efx_rc = efx_mae_mport_by_pcie_mh_function(mport->emd_vnic.ev_intf, + mport->emd_vnic.ev_pf, + mport->emd_vnic.ev_vf, + &entity_selector); + if (efx_rc != 0) { + sfc_err(ctx->sa, "failed to build entity mport selector for c%upf%uvf%u", + mport->emd_vnic.ev_intf, + mport->emd_vnic.ev_pf, + mport->emd_vnic.ev_vf); + return efx_rc; + } + + rc = sfc_mae_switch_port_id_by_entity(ctx->switch_domain_id, + &entity_selector, + SFC_MAE_SWITCH_PORT_REPRESENTOR, + &switch_port_id); + switch (rc) { + case 0: + /* Already registered */ + break; + case ENOENT: + /* + * No representor has been created for this entity. + * Create a dummy switch registry entry with an invalid ethdev + * mport selector. When a corresponding representor is created, + * this entry will be updated. + */ + req.type = SFC_MAE_SWITCH_PORT_REPRESENTOR; + req.entity_mportp = &entity_selector; + req.ethdev_mportp = ðdev_mport; + req.ethdev_port_id = RTE_MAX_ETHPORTS; + req.port_data.repr.intf = mport->emd_vnic.ev_intf; + req.port_data.repr.pf = mport->emd_vnic.ev_pf; + req.port_data.repr.vf = mport->emd_vnic.ev_vf; + + rc = sfc_mae_assign_switch_port(ctx->switch_domain_id, + &req, &switch_port_id); + if (rc != 0) { + sfc_err(ctx->sa, + "failed to assign MAE switch port for c%upf%uvf%u: %s", + mport->emd_vnic.ev_intf, + mport->emd_vnic.ev_pf, + mport->emd_vnic.ev_vf, + rte_strerror(rc)); + return rc; + } + break; + default: + sfc_err(ctx->sa, "failed to find MAE switch port for c%upf%uvf%u: %s", + mport->emd_vnic.ev_intf, + mport->emd_vnic.ev_pf, + mport->emd_vnic.ev_vf, + rte_strerror(rc)); + return rc; + } + + return 0; +} + +static efx_rc_t +sfc_process_mport_journal_cb(void *data, efx_mport_desc_t *mport, + size_t mport_len) +{ + struct sfc_mport_journal_ctx *ctx = data; + + if (ctx == NULL || ctx->sa == NULL) { + sfc_err(ctx->sa, "received NULL context or SFC adapter"); + return EINVAL; + } + + if (mport_len != sizeof(*mport)) { + sfc_err(ctx->sa, "actual and expected mport buffer sizes differ"); + return EINVAL; + } + + SFC_ASSERT(sfc_adapter_is_locked(ctx->sa)); + + /* + * If a zombie flag is set, it means the mport has been marked for + * deletion and cannot be used for any new operations. The mport will + * be destroyed completely once all references to it are released. + */ + if (mport->emd_zombie) { + sfc_dbg(ctx->sa, "mport is a zombie, skipping"); + return 0; + } + if (mport->emd_type != EFX_MPORT_TYPE_VNIC) { + sfc_dbg(ctx->sa, "mport is not a VNIC, skipping"); + return 0; + } + if (mport->emd_vnic.ev_client_type != EFX_MPORT_VNIC_CLIENT_FUNCTION) { + sfc_dbg(ctx->sa, "mport is not a function, skipping"); + return 0; + } + if (mport->emd_vnic.ev_handle == ctx->mcdi_handle) { + sfc_dbg(ctx->sa, "mport is this driver instance, skipping"); + return 0; + } + + return sfc_process_mport_journal_entry(ctx, mport); +} + +static int +sfc_process_mport_journal(struct sfc_adapter *sa) +{ + struct sfc_mport_journal_ctx ctx; + const efx_pcie_interface_t *controllers; + size_t nb_controllers; + efx_rc_t efx_rc; + int rc; + + memset(&ctx, 0, sizeof(ctx)); + ctx.sa = sa; + ctx.switch_domain_id = sa->mae.switch_domain_id; + + efx_rc = efx_mcdi_get_own_client_handle(sa->nic, &ctx.mcdi_handle); + if (efx_rc != 0) { + sfc_err(sa, "failed to get own MCDI handle"); + SFC_ASSERT(efx_rc > 0); + return efx_rc; + } + + rc = sfc_mae_switch_domain_controllers(ctx.switch_domain_id, + &controllers, &nb_controllers); + if (rc != 0) { + sfc_err(sa, "failed to get controller mapping"); + return rc; + } + + ctx.controllers_assigned = controllers != NULL; + ctx.controllers = NULL; + ctx.nb_controllers = 0; + + efx_rc = efx_mae_read_mport_journal(sa->nic, + sfc_process_mport_journal_cb, &ctx); + if (efx_rc != 0) { + sfc_err(sa, "failed to process MAE mport journal"); + SFC_ASSERT(efx_rc > 0); + return efx_rc; + } + + if (controllers == NULL) { + rc = sfc_mae_switch_domain_map_controllers(ctx.switch_domain_id, + ctx.controllers, + ctx.nb_controllers); + if (rc != 0) + return rc; + } + + return 0; +} + +static void +sfc_count_representors_cb(enum sfc_mae_switch_port_type type, + const efx_mport_sel_t *ethdev_mportp __rte_unused, + uint16_t ethdev_port_id __rte_unused, + const efx_mport_sel_t *entity_mportp __rte_unused, + uint16_t switch_port_id __rte_unused, + union sfc_mae_switch_port_data *port_datap + __rte_unused, + void *user_datap) +{ + int *counter = user_datap; + + SFC_ASSERT(counter != NULL); + + if (type == SFC_MAE_SWITCH_PORT_REPRESENTOR) + (*counter)++; +} + +struct sfc_get_representors_ctx { + struct rte_eth_representor_info *info; + struct sfc_adapter *sa; + uint16_t switch_domain_id; + const efx_pcie_interface_t *controllers; + size_t nb_controllers; +}; + +static void +sfc_get_representors_cb(enum sfc_mae_switch_port_type type, + const efx_mport_sel_t *ethdev_mportp __rte_unused, + uint16_t ethdev_port_id __rte_unused, + const efx_mport_sel_t *entity_mportp __rte_unused, + uint16_t switch_port_id, + union sfc_mae_switch_port_data *port_datap, + void *user_datap) +{ + struct sfc_get_representors_ctx *ctx = user_datap; + struct rte_eth_representor_range *range; + int ret; + int rc; + + SFC_ASSERT(ctx != NULL); + SFC_ASSERT(ctx->info != NULL); + SFC_ASSERT(ctx->sa != NULL); + + if (type != SFC_MAE_SWITCH_PORT_REPRESENTOR) { + sfc_dbg(ctx->sa, "not a representor, skipping"); + return; + } + if (ctx->info->nb_ranges >= ctx->info->nb_ranges_alloc) { + sfc_dbg(ctx->sa, "info structure is full already"); + return; + } + + range = &ctx->info->ranges[ctx->info->nb_ranges]; + rc = sfc_mae_switch_controller_from_mapping(ctx->controllers, + ctx->nb_controllers, + port_datap->repr.intf, + &range->controller); + if (rc != 0) { + sfc_err(ctx->sa, "invalid representor controller: %d", + port_datap->repr.intf); + range->controller = -1; + } + range->pf = port_datap->repr.pf; + range->id_base = switch_port_id; + range->id_end = switch_port_id; + + if (port_datap->repr.vf != EFX_PCI_VF_INVALID) { + range->type = RTE_ETH_REPRESENTOR_VF; + range->vf = port_datap->repr.vf; + ret = snprintf(range->name, RTE_DEV_NAME_MAX_LEN, + "c%dpf%dvf%d", range->controller, range->pf, + range->vf); + } else { + range->type = RTE_ETH_REPRESENTOR_PF; + ret = snprintf(range->name, RTE_DEV_NAME_MAX_LEN, + "c%dpf%d", range->controller, range->pf); + } + if (ret >= RTE_DEV_NAME_MAX_LEN) { + sfc_err(ctx->sa, "representor name has been truncated: %s", + range->name); + } + + ctx->info->nb_ranges++; +} + +static int +sfc_representor_info_get(struct rte_eth_dev *dev, + struct rte_eth_representor_info *info) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_get_representors_ctx get_repr_ctx; + const efx_nic_cfg_t *nic_cfg; + uint16_t switch_domain_id; + uint32_t nb_repr; + int controller; + int rc; + + sfc_adapter_lock(sa); + + if (sa->mae.status != SFC_MAE_STATUS_ADMIN) { + sfc_adapter_unlock(sa); + return -ENOTSUP; + } + + rc = sfc_process_mport_journal(sa); + if (rc != 0) { + sfc_adapter_unlock(sa); + SFC_ASSERT(rc > 0); + return -rc; + } + + switch_domain_id = sa->mae.switch_domain_id; + + nb_repr = 0; + rc = sfc_mae_switch_ports_iterate(switch_domain_id, + sfc_count_representors_cb, + &nb_repr); + if (rc != 0) { + sfc_adapter_unlock(sa); + SFC_ASSERT(rc > 0); + return -rc; + } + + if (info == NULL) { + sfc_adapter_unlock(sa); + return nb_repr; + } + + rc = sfc_mae_switch_domain_controllers(switch_domain_id, + &get_repr_ctx.controllers, + &get_repr_ctx.nb_controllers); + if (rc != 0) { + sfc_adapter_unlock(sa); + SFC_ASSERT(rc > 0); + return -rc; + } + + nic_cfg = efx_nic_cfg_get(sa->nic); + + rc = sfc_mae_switch_domain_get_controller(switch_domain_id, + nic_cfg->enc_intf, + &controller); + if (rc != 0) { + sfc_err(sa, "invalid controller: %d", nic_cfg->enc_intf); + controller = -1; + } + + info->controller = controller; + info->pf = nic_cfg->enc_pf; + + get_repr_ctx.info = info; + get_repr_ctx.sa = sa; + get_repr_ctx.switch_domain_id = switch_domain_id; + rc = sfc_mae_switch_ports_iterate(switch_domain_id, + sfc_get_representors_cb, + &get_repr_ctx); + if (rc != 0) { + sfc_adapter_unlock(sa); + SFC_ASSERT(rc > 0); + return -rc; + } + + sfc_adapter_unlock(sa); + return nb_repr; +} + +static int +sfc_rx_metadata_negotiate(struct rte_eth_dev *dev, uint64_t *features) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + uint64_t supported = 0; + + sfc_adapter_lock(sa); + + if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_FLAG) != 0) + supported |= RTE_ETH_RX_METADATA_USER_FLAG; + + if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_MARK) != 0) + supported |= RTE_ETH_RX_METADATA_USER_MARK; + + if (sfc_flow_tunnel_is_supported(sa)) + supported |= RTE_ETH_RX_METADATA_TUNNEL_ID; + + sa->negotiated_rx_metadata = supported & *features; + *features = sa->negotiated_rx_metadata; + + sfc_adapter_unlock(sa); + + return 0; +} + static const struct eth_dev_ops sfc_eth_dev_ops = { .dev_configure = sfc_dev_configure, .dev_start = sfc_dev_start, @@ -1965,6 +2381,8 @@ static const struct eth_dev_ops sfc_eth_dev_ops = { .xstats_get_by_id = sfc_xstats_get_by_id, .xstats_get_names_by_id = sfc_xstats_get_names_by_id, .pool_ops_supported = sfc_pool_ops_supported, + .representor_info_get = sfc_representor_info_get, + .rx_metadata_negotiate = sfc_rx_metadata_negotiate, }; struct sfc_ethdev_init_data { @@ -2061,6 +2479,12 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) goto fail_dp_rx_name; } + if (strcmp(dp_rx->dp.name, SFC_KVARG_DATAPATH_EF10_ESSB) == 0) { + /* FLAG and MARK are always available from Rx prefix. */ + sa->negotiated_rx_metadata |= RTE_ETH_RX_METADATA_USER_FLAG; + sa->negotiated_rx_metadata |= RTE_ETH_RX_METADATA_USER_MARK; + } + sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name); rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH, @@ -2264,7 +2688,7 @@ sfc_parse_switch_mode(struct sfc_adapter *sa, bool has_representors) goto fail_kvargs; if (switch_mode == NULL) { - sa->switchdev = encp->enc_mae_supported && + sa->switchdev = encp->enc_mae_admin && (!encp->enc_datapath_cap_evb || has_representors); } else if (strcasecmp(switch_mode, SFC_KVARG_SWITCH_MODE_LEGACY) == 0) { @@ -2359,7 +2783,6 @@ sfc_eth_dev_init(struct rte_eth_dev *dev, void *init_params) /* Copy PCI device info to the dev->data */ rte_eth_copy_pci_info(dev, pci_dev); - dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; rc = sfc_kvargs_parse(sa); @@ -2400,9 +2823,9 @@ sfc_eth_dev_init(struct rte_eth_dev *dev, void *init_params) if (rc != 0) goto fail_attach; - if (sa->switchdev && sa->mae.status != SFC_MAE_STATUS_SUPPORTED) { + if (sa->switchdev && sa->mae.status != SFC_MAE_STATUS_ADMIN) { sfc_err(sa, - "failed to enable switchdev mode without MAE support"); + "failed to enable switchdev mode without admin MAE privilege"); rc = ENOTSUP; goto fail_switchdev_no_mae; } @@ -2493,47 +2916,181 @@ sfc_parse_rte_devargs(const char *args, struct rte_eth_devargs *devargs) } static int -sfc_eth_dev_create(struct rte_pci_device *pci_dev, - struct sfc_ethdev_init_data *init_data, - struct rte_eth_dev **devp) +sfc_eth_dev_find_or_create(struct rte_pci_device *pci_dev, + struct sfc_ethdev_init_data *init_data, + struct rte_eth_dev **devp, + bool *dev_created) { struct rte_eth_dev *dev; + bool created = false; int rc; - rc = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, - sizeof(struct sfc_adapter_shared), - eth_dev_pci_specific_init, pci_dev, - sfc_eth_dev_init, init_data); - if (rc != 0) { - SFC_GENERIC_LOG(ERR, "Failed to create sfc ethdev '%s'", - pci_dev->device.name); - return rc; - } - dev = rte_eth_dev_allocated(pci_dev->device.name); if (dev == NULL) { - SFC_GENERIC_LOG(ERR, "Failed to find allocated sfc ethdev '%s'", + rc = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, + sizeof(struct sfc_adapter_shared), + eth_dev_pci_specific_init, pci_dev, + sfc_eth_dev_init, init_data); + if (rc != 0) { + SFC_GENERIC_LOG(ERR, "Failed to create sfc ethdev '%s'", + pci_dev->device.name); + return rc; + } + + created = true; + + dev = rte_eth_dev_allocated(pci_dev->device.name); + if (dev == NULL) { + SFC_GENERIC_LOG(ERR, + "Failed to find allocated sfc ethdev '%s'", pci_dev->device.name); - return -ENODEV; + return -ENODEV; + } } *devp = dev; + *dev_created = created; + + return 0; +} + +static int +sfc_eth_dev_create_repr(struct sfc_adapter *sa, + efx_pcie_interface_t controller, + uint16_t port, + uint16_t repr_port, + enum rte_eth_representor_type type) +{ + struct sfc_repr_entity_info entity; + efx_mport_sel_t mport_sel; + int rc; + + switch (type) { + case RTE_ETH_REPRESENTOR_NONE: + return 0; + case RTE_ETH_REPRESENTOR_VF: + case RTE_ETH_REPRESENTOR_PF: + break; + case RTE_ETH_REPRESENTOR_SF: + sfc_err(sa, "SF representors are not supported"); + return ENOTSUP; + default: + sfc_err(sa, "unknown representor type: %d", type); + return ENOTSUP; + } + + rc = efx_mae_mport_by_pcie_mh_function(controller, + port, + repr_port, + &mport_sel); + if (rc != 0) { + sfc_err(sa, + "failed to get m-port selector for controller %u port %u repr_port %u: %s", + controller, port, repr_port, rte_strerror(-rc)); + return rc; + } + + memset(&entity, 0, sizeof(entity)); + entity.type = type; + entity.intf = controller; + entity.pf = port; + entity.vf = repr_port; + + rc = sfc_repr_create(sa->eth_dev, &entity, sa->mae.switch_domain_id, + &mport_sel); + if (rc != 0) { + sfc_err(sa, + "failed to create representor for controller %u port %u repr_port %u: %s", + controller, port, repr_port, rte_strerror(-rc)); + return rc; + } return 0; } +static int +sfc_eth_dev_create_repr_port(struct sfc_adapter *sa, + const struct rte_eth_devargs *eth_da, + efx_pcie_interface_t controller, + uint16_t port) +{ + int first_error = 0; + uint16_t i; + int rc; + + if (eth_da->type == RTE_ETH_REPRESENTOR_PF) { + return sfc_eth_dev_create_repr(sa, controller, port, + EFX_PCI_VF_INVALID, + eth_da->type); + } + + for (i = 0; i < eth_da->nb_representor_ports; i++) { + rc = sfc_eth_dev_create_repr(sa, controller, port, + eth_da->representor_ports[i], + eth_da->type); + if (rc != 0 && first_error == 0) + first_error = rc; + } + + return first_error; +} + +static int +sfc_eth_dev_create_repr_controller(struct sfc_adapter *sa, + const struct rte_eth_devargs *eth_da, + efx_pcie_interface_t controller) +{ + const efx_nic_cfg_t *encp; + int first_error = 0; + uint16_t default_port; + uint16_t i; + int rc; + + if (eth_da->nb_ports == 0) { + encp = efx_nic_cfg_get(sa->nic); + default_port = encp->enc_intf == controller ? encp->enc_pf : 0; + return sfc_eth_dev_create_repr_port(sa, eth_da, controller, + default_port); + } + + for (i = 0; i < eth_da->nb_ports; i++) { + rc = sfc_eth_dev_create_repr_port(sa, eth_da, controller, + eth_da->ports[i]); + if (rc != 0 && first_error == 0) + first_error = rc; + } + + return first_error; +} + static int sfc_eth_dev_create_representors(struct rte_eth_dev *dev, const struct rte_eth_devargs *eth_da) { + efx_pcie_interface_t intf; + const efx_nic_cfg_t *encp; struct sfc_adapter *sa; - unsigned int i; + uint16_t switch_domain_id; + uint16_t i; int rc; - if (eth_da->nb_representor_ports == 0) - return 0; - sa = sfc_adapter_by_eth_dev(dev); + switch_domain_id = sa->mae.switch_domain_id; + + switch (eth_da->type) { + case RTE_ETH_REPRESENTOR_NONE: + return 0; + case RTE_ETH_REPRESENTOR_PF: + case RTE_ETH_REPRESENTOR_VF: + break; + case RTE_ETH_REPRESENTOR_SF: + sfc_err(sa, "SF representors are not supported"); + return -ENOTSUP; + default: + sfc_err(sa, "unknown representor type: %d", + eth_da->type); + return -ENOTSUP; + } if (!sa->switchdev) { sfc_err(sa, "cannot create representors in non-switchdev mode"); @@ -2546,27 +3103,32 @@ sfc_eth_dev_create_representors(struct rte_eth_dev *dev, return -ENOTSUP; } - for (i = 0; i < eth_da->nb_representor_ports; ++i) { - const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); - efx_mport_sel_t mport_sel; - - rc = efx_mae_mport_by_pcie_function(encp->enc_pf, - eth_da->representor_ports[i], &mport_sel); - if (rc != 0) { - sfc_err(sa, - "failed to get representor %u m-port: %s - ignore", - eth_da->representor_ports[i], - rte_strerror(-rc)); - continue; - } + /* + * This is needed to construct the DPDK controller -> EFX interface + * mapping. + */ + sfc_adapter_lock(sa); + rc = sfc_process_mport_journal(sa); + sfc_adapter_unlock(sa); + if (rc != 0) { + SFC_ASSERT(rc > 0); + return -rc; + } - rc = sfc_repr_create(dev, eth_da->representor_ports[i], - sa->mae.switch_domain_id, &mport_sel); - if (rc != 0) { - sfc_err(sa, "cannot create representor %u: %s - ignore", - eth_da->representor_ports[i], - rte_strerror(-rc)); + if (eth_da->nb_mh_controllers > 0) { + for (i = 0; i < eth_da->nb_mh_controllers; i++) { + rc = sfc_mae_switch_domain_get_intf(switch_domain_id, + eth_da->mh_controllers[i], + &intf); + if (rc != 0) { + sfc_err(sa, "failed to get representor"); + continue; + } + sfc_eth_dev_create_repr_controller(sa, eth_da, intf); } + } else { + encp = efx_nic_cfg_get(sa->nic); + sfc_eth_dev_create_repr_controller(sa, eth_da, encp->enc_intf); } return 0; @@ -2578,6 +3140,7 @@ static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct sfc_ethdev_init_data init_data; struct rte_eth_devargs eth_da; struct rte_eth_dev *dev; + bool dev_created; int rc; if (pci_dev->device.devargs != NULL) { @@ -2589,9 +3152,13 @@ static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, memset(ð_da, 0, sizeof(eth_da)); } - init_data.nb_representors = eth_da.nb_representor_ports; + /* If no VF representors specified, check for PF ones */ + if (eth_da.nb_representor_ports > 0) + init_data.nb_representors = eth_da.nb_representor_ports; + else + init_data.nb_representors = eth_da.nb_ports; - if (eth_da.nb_representor_ports > 0 && + if (init_data.nb_representors > 0 && rte_eal_process_type() != RTE_PROC_PRIMARY) { SFC_GENERIC_LOG(ERR, "Create representors from secondary process not supported, dev '%s'", @@ -2599,13 +3166,21 @@ static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, return -ENOTSUP; } - rc = sfc_eth_dev_create(pci_dev, &init_data, &dev); + /* + * Driver supports RTE_PCI_DRV_PROBE_AGAIN. Hence create device only + * if it does not already exist. Re-probing an existing device is + * expected to allow additional representors to be configured. + */ + rc = sfc_eth_dev_find_or_create(pci_dev, &init_data, &dev, + &dev_created); if (rc != 0) return rc; rc = sfc_eth_dev_create_representors(dev, ð_da); if (rc != 0) { - (void)rte_eth_dev_destroy(dev, sfc_eth_dev_uninit); + if (dev_created) + (void)rte_eth_dev_destroy(dev, sfc_eth_dev_uninit); + return rc; } @@ -2621,7 +3196,8 @@ static struct rte_pci_driver sfc_efx_pmd = { .id_table = pci_id_sfc_efx_map, .drv_flags = RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_NEED_MAPPING, + RTE_PCI_DRV_NEED_MAPPING | + RTE_PCI_DRV_PROBE_AGAIN, .probe = sfc_eth_dev_pci_probe, .remove = sfc_eth_dev_pci_remove, };