net/ngbe: support MAC filters
[dpdk.git] / drivers / net / sfc / sfc_ethdev.c
index e8c67c9..833d833 100644 (file)
@@ -26,6 +26,7 @@
 #include "sfc_rx.h"
 #include "sfc_tx.h"
 #include "sfc_flow.h"
+#include "sfc_flow_tunnel.h"
 #include "sfc_dp.h"
 #include "sfc_dp_rx.h"
 #include "sfc_repr.h"
@@ -104,19 +105,19 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_vfs = sa->sriov.num_vfs;
 
        /* Autonegotiation may be disabled */
-       dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
        if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
-               dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_1G;
        if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
-               dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10G;
        if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
-               dev_info->speed_capa |= ETH_LINK_SPEED_25G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G;
        if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
-               dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
        if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
-               dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
        if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
-               dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
 
        dev_info->max_rx_queues = sa->rxq_max;
        dev_info->max_tx_queues = sa->txq_max;
@@ -144,8 +145,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
                                    dev_info->tx_queue_offload_capa;
 
-       if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-               txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+       if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+               txq_offloads_def |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
        dev_info->default_txconf.offloads |= txq_offloads_def;
 
@@ -186,7 +187,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                             RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 
-       if (mae->status == SFC_MAE_STATUS_SUPPORTED) {
+       if (mae->status == SFC_MAE_STATUS_SUPPORTED ||
+           mae->status == SFC_MAE_STATUS_ADMIN) {
                dev_info->switch_info.name = dev->device->driver->name;
                dev_info->switch_info.domain_id = mae->switch_domain_id;
                dev_info->switch_info.port_id = mae->switch_port_id;
@@ -987,16 +989,16 @@ sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
        switch (link_fc) {
        case 0:
-               fc_conf->mode = RTE_FC_NONE;
+               fc_conf->mode = RTE_ETH_FC_NONE;
                break;
        case EFX_FCNTL_RESPOND:
-               fc_conf->mode = RTE_FC_RX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
                break;
        case EFX_FCNTL_GENERATE:
-               fc_conf->mode = RTE_FC_TX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
                break;
        case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
-               fc_conf->mode = RTE_FC_FULL;
+               fc_conf->mode = RTE_ETH_FC_FULL;
                break;
        default:
                sfc_err(sa, "%s: unexpected flow control value %#x",
@@ -1027,16 +1029,16 @@ sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        }
 
        switch (fc_conf->mode) {
-       case RTE_FC_NONE:
+       case RTE_ETH_FC_NONE:
                fcntl = 0;
                break;
-       case RTE_FC_RX_PAUSE:
+       case RTE_ETH_FC_RX_PAUSE:
                fcntl = EFX_FCNTL_RESPOND;
                break;
-       case RTE_FC_TX_PAUSE:
+       case RTE_ETH_FC_TX_PAUSE:
                fcntl = EFX_FCNTL_GENERATE;
                break;
-       case RTE_FC_FULL:
+       case RTE_ETH_FC_FULL:
                fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
                break;
        default:
@@ -1139,17 +1141,6 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
                }
        }
 
-       /*
-        * The driver does not use it, but other PMDs update jumbo frame
-        * flag and max_rx_pkt_len when MTU is set.
-        */
-       if (mtu > RTE_ETHER_MTU) {
-               struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-               rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-       }
-
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
-
        sfc_adapter_unlock(sa);
 
        sfc_log_init(sa, "done");
@@ -1322,7 +1313,7 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
        qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
        qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
        if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
-               qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+               qinfo->conf.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
                qinfo->scattered_rx = 1;
        }
        qinfo->nb_desc = rxq_info->entries;
@@ -1356,19 +1347,19 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
  * use any process-local pointers from the adapter data.
  */
 static uint32_t
-sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid)
+sfc_rx_queue_count(void *rx_queue)
 {
-       const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
-       struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
-       sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
+       struct sfc_dp_rxq *dp_rxq = rx_queue;
+       const struct sfc_dp_rx *dp_rx;
        struct sfc_rxq_info *rxq_info;
 
-       rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
+       dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
+       rxq_info = sfc_rxq_info_by_dp_rxq(dp_rxq);
 
        if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
                return 0;
 
-       return sap->dp_rx->qdesc_npending(rxq_info->dp);
+       return dp_rx->qdesc_npending(dp_rxq);
 }
 
 /*
@@ -1532,9 +1523,9 @@ static efx_tunnel_protocol_t
 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
 {
        switch (rte_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                return EFX_TUNNEL_PROTOCOL_VXLAN;
-       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
                return EFX_TUNNEL_PROTOCOL_GENEVE;
        default:
                return EFX_TUNNEL_NPROTOS;
@@ -1661,7 +1652,7 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 
        /*
         * Mapping of hash configuration between RTE and EFX is not one-to-one,
-        * hence, conversion is done here to derive a correct set of ETH_RSS
+        * hence, conversion is done here to derive a correct set of RTE_ETH_RSS
         * flags which corresponds to the active EFX configuration stored
         * locally in 'sfc_adapter' and kept up-to-date
         */
@@ -1787,8 +1778,8 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
                return -EINVAL;
 
        for (entry = 0; entry < reta_size; entry++) {
-               int grp = entry / RTE_RETA_GROUP_SIZE;
-               int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+               int grp = entry / RTE_ETH_RETA_GROUP_SIZE;
+               int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
 
                if ((reta_conf[grp].mask >> grp_idx) & 1)
                        reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
@@ -1837,10 +1828,10 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
        rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
 
        for (entry = 0; entry < reta_size; entry++) {
-               int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+               int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
                struct rte_eth_rss_reta_entry64 *grp;
 
-               grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+               grp = &reta_conf[entry / RTE_ETH_RETA_GROUP_SIZE];
 
                if (grp->mask & (1ull << grp_idx)) {
                        if (grp->reta[grp_idx] >= rss->channels) {
@@ -1978,7 +1969,11 @@ static efx_rc_t
 sfc_process_mport_journal_entry(struct sfc_mport_journal_ctx *ctx,
                                efx_mport_desc_t *mport)
 {
+       struct sfc_mae_switch_port_request req;
+       efx_mport_sel_t entity_selector;
        efx_mport_sel_t ethdev_mport;
+       uint16_t switch_port_id;
+       efx_rc_t efx_rc;
        int rc;
 
        sfc_dbg(ctx->sa,
@@ -1994,6 +1989,63 @@ sfc_process_mport_journal_entry(struct sfc_mport_journal_ctx *ctx,
                        return rc;
        }
 
+       /* Build Mport selector */
+       efx_rc = efx_mae_mport_by_pcie_mh_function(mport->emd_vnic.ev_intf,
+                                               mport->emd_vnic.ev_pf,
+                                               mport->emd_vnic.ev_vf,
+                                               &entity_selector);
+       if (efx_rc != 0) {
+               sfc_err(ctx->sa, "failed to build entity mport selector for c%upf%uvf%u",
+                       mport->emd_vnic.ev_intf,
+                       mport->emd_vnic.ev_pf,
+                       mport->emd_vnic.ev_vf);
+               return efx_rc;
+       }
+
+       rc = sfc_mae_switch_port_id_by_entity(ctx->switch_domain_id,
+                                             &entity_selector,
+                                             SFC_MAE_SWITCH_PORT_REPRESENTOR,
+                                             &switch_port_id);
+       switch (rc) {
+       case 0:
+               /* Already registered */
+               break;
+       case ENOENT:
+               /*
+                * No representor has been created for this entity.
+                * Create a dummy switch registry entry with an invalid ethdev
+                * mport selector. When a corresponding representor is created,
+                * this entry will be updated.
+                */
+               req.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
+               req.entity_mportp = &entity_selector;
+               req.ethdev_mportp = &ethdev_mport;
+               req.ethdev_port_id = RTE_MAX_ETHPORTS;
+               req.port_data.repr.intf = mport->emd_vnic.ev_intf;
+               req.port_data.repr.pf = mport->emd_vnic.ev_pf;
+               req.port_data.repr.vf = mport->emd_vnic.ev_vf;
+
+               rc = sfc_mae_assign_switch_port(ctx->switch_domain_id,
+                                               &req, &switch_port_id);
+               if (rc != 0) {
+                       sfc_err(ctx->sa,
+                               "failed to assign MAE switch port for c%upf%uvf%u: %s",
+                               mport->emd_vnic.ev_intf,
+                               mport->emd_vnic.ev_pf,
+                               mport->emd_vnic.ev_vf,
+                               rte_strerror(rc));
+                       return rc;
+               }
+               break;
+       default:
+               sfc_err(ctx->sa, "failed to find MAE switch port for c%upf%uvf%u: %s",
+                       mport->emd_vnic.ev_intf,
+                       mport->emd_vnic.ev_pf,
+                       mport->emd_vnic.ev_vf,
+                       rte_strerror(rc));
+               return rc;
+       }
+
        return 0;
 }
 
@@ -2090,6 +2142,198 @@ sfc_process_mport_journal(struct sfc_adapter *sa)
        return 0;
 }
 
+static void
+sfc_count_representors_cb(enum sfc_mae_switch_port_type type,
+                         const efx_mport_sel_t *ethdev_mportp __rte_unused,
+                         uint16_t ethdev_port_id __rte_unused,
+                         const efx_mport_sel_t *entity_mportp __rte_unused,
+                         uint16_t switch_port_id __rte_unused,
+                         union sfc_mae_switch_port_data *port_datap
+                               __rte_unused,
+                         void *user_datap)
+{
+       int *counter = user_datap;
+
+       SFC_ASSERT(counter != NULL);
+
+       if (type == SFC_MAE_SWITCH_PORT_REPRESENTOR)
+               (*counter)++;
+}
+
+struct sfc_get_representors_ctx {
+       struct rte_eth_representor_info *info;
+       struct sfc_adapter              *sa;
+       uint16_t                        switch_domain_id;
+       const efx_pcie_interface_t      *controllers;
+       size_t                          nb_controllers;
+};
+
+static void
+sfc_get_representors_cb(enum sfc_mae_switch_port_type type,
+                       const efx_mport_sel_t *ethdev_mportp __rte_unused,
+                       uint16_t ethdev_port_id __rte_unused,
+                       const efx_mport_sel_t *entity_mportp __rte_unused,
+                       uint16_t switch_port_id,
+                       union sfc_mae_switch_port_data *port_datap,
+                       void *user_datap)
+{
+       struct sfc_get_representors_ctx *ctx = user_datap;
+       struct rte_eth_representor_range *range;
+       int ret;
+       int rc;
+
+       SFC_ASSERT(ctx != NULL);
+       SFC_ASSERT(ctx->info != NULL);
+       SFC_ASSERT(ctx->sa != NULL);
+
+       if (type != SFC_MAE_SWITCH_PORT_REPRESENTOR) {
+               sfc_dbg(ctx->sa, "not a representor, skipping");
+               return;
+       }
+       if (ctx->info->nb_ranges >= ctx->info->nb_ranges_alloc) {
+               sfc_dbg(ctx->sa, "info structure is full already");
+               return;
+       }
+
+       range = &ctx->info->ranges[ctx->info->nb_ranges];
+       rc = sfc_mae_switch_controller_from_mapping(ctx->controllers,
+                                                   ctx->nb_controllers,
+                                                   port_datap->repr.intf,
+                                                   &range->controller);
+       if (rc != 0) {
+               sfc_err(ctx->sa, "invalid representor controller: %d",
+                       port_datap->repr.intf);
+               range->controller = -1;
+       }
+       range->pf = port_datap->repr.pf;
+       range->id_base = switch_port_id;
+       range->id_end = switch_port_id;
+
+       if (port_datap->repr.vf != EFX_PCI_VF_INVALID) {
+               range->type = RTE_ETH_REPRESENTOR_VF;
+               range->vf = port_datap->repr.vf;
+               ret = snprintf(range->name, RTE_DEV_NAME_MAX_LEN,
+                              "c%dpf%dvf%d", range->controller, range->pf,
+                              range->vf);
+       } else {
+               range->type = RTE_ETH_REPRESENTOR_PF;
+               ret = snprintf(range->name, RTE_DEV_NAME_MAX_LEN,
+                        "c%dpf%d", range->controller, range->pf);
+       }
+       if (ret >= RTE_DEV_NAME_MAX_LEN) {
+               sfc_err(ctx->sa, "representor name has been truncated: %s",
+                       range->name);
+       }
+
+       ctx->info->nb_ranges++;
+}
+
+static int
+sfc_representor_info_get(struct rte_eth_dev *dev,
+                        struct rte_eth_representor_info *info)
+{
+       struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+       struct sfc_get_representors_ctx get_repr_ctx;
+       const efx_nic_cfg_t *nic_cfg;
+       uint16_t switch_domain_id;
+       uint32_t nb_repr;
+       int controller;
+       int rc;
+
+       sfc_adapter_lock(sa);
+
+       if (sa->mae.status != SFC_MAE_STATUS_ADMIN) {
+               sfc_adapter_unlock(sa);
+               return -ENOTSUP;
+       }
+
+       rc = sfc_process_mport_journal(sa);
+       if (rc != 0) {
+               sfc_adapter_unlock(sa);
+               SFC_ASSERT(rc > 0);
+               return -rc;
+       }
+
+       switch_domain_id = sa->mae.switch_domain_id;
+
+       nb_repr = 0;
+       rc = sfc_mae_switch_ports_iterate(switch_domain_id,
+                                         sfc_count_representors_cb,
+                                         &nb_repr);
+       if (rc != 0) {
+               sfc_adapter_unlock(sa);
+               SFC_ASSERT(rc > 0);
+               return -rc;
+       }
+
+       if (info == NULL) {
+               sfc_adapter_unlock(sa);
+               return nb_repr;
+       }
+
+       rc = sfc_mae_switch_domain_controllers(switch_domain_id,
+                                              &get_repr_ctx.controllers,
+                                              &get_repr_ctx.nb_controllers);
+       if (rc != 0) {
+               sfc_adapter_unlock(sa);
+               SFC_ASSERT(rc > 0);
+               return -rc;
+       }
+
+       nic_cfg = efx_nic_cfg_get(sa->nic);
+
+       rc = sfc_mae_switch_domain_get_controller(switch_domain_id,
+                                                 nic_cfg->enc_intf,
+                                                 &controller);
+       if (rc != 0) {
+               sfc_err(sa, "invalid controller: %d", nic_cfg->enc_intf);
+               controller = -1;
+       }
+
+       info->controller = controller;
+       info->pf = nic_cfg->enc_pf;
+
+       get_repr_ctx.info = info;
+       get_repr_ctx.sa = sa;
+       get_repr_ctx.switch_domain_id = switch_domain_id;
+       rc = sfc_mae_switch_ports_iterate(switch_domain_id,
+                                         sfc_get_representors_cb,
+                                         &get_repr_ctx);
+       if (rc != 0) {
+               sfc_adapter_unlock(sa);
+               SFC_ASSERT(rc > 0);
+               return -rc;
+       }
+
+       sfc_adapter_unlock(sa);
+       return nb_repr;
+}
+
+static int
+sfc_rx_metadata_negotiate(struct rte_eth_dev *dev, uint64_t *features)
+{
+       struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+       uint64_t supported = 0;
+
+       sfc_adapter_lock(sa);
+
+       if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_FLAG) != 0)
+               supported |= RTE_ETH_RX_METADATA_USER_FLAG;
+
+       if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_MARK) != 0)
+               supported |= RTE_ETH_RX_METADATA_USER_MARK;
+
+       if (sfc_flow_tunnel_is_supported(sa))
+               supported |= RTE_ETH_RX_METADATA_TUNNEL_ID;
+
+       sa->negotiated_rx_metadata = supported & *features;
+       *features = sa->negotiated_rx_metadata;
+
+       sfc_adapter_unlock(sa);
+
+       return 0;
+}
+
 static const struct eth_dev_ops sfc_eth_dev_ops = {
        .dev_configure                  = sfc_dev_configure,
        .dev_start                      = sfc_dev_start,
@@ -2137,6 +2381,8 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
        .xstats_get_by_id               = sfc_xstats_get_by_id,
        .xstats_get_names_by_id         = sfc_xstats_get_names_by_id,
        .pool_ops_supported             = sfc_pool_ops_supported,
+       .representor_info_get           = sfc_representor_info_get,
+       .rx_metadata_negotiate          = sfc_rx_metadata_negotiate,
 };
 
 struct sfc_ethdev_init_data {
@@ -2233,6 +2479,12 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
                goto fail_dp_rx_name;
        }
 
+       if (strcmp(dp_rx->dp.name, SFC_KVARG_DATAPATH_EF10_ESSB) == 0) {
+               /* FLAG and MARK are always available from Rx prefix. */
+               sa->negotiated_rx_metadata |= RTE_ETH_RX_METADATA_USER_FLAG;
+               sa->negotiated_rx_metadata |= RTE_ETH_RX_METADATA_USER_MARK;
+       }
+
        sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name);
 
        rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
@@ -2436,7 +2688,7 @@ sfc_parse_switch_mode(struct sfc_adapter *sa, bool has_representors)
                goto fail_kvargs;
 
        if (switch_mode == NULL) {
-               sa->switchdev = encp->enc_mae_supported &&
+               sa->switchdev = encp->enc_mae_admin &&
                                (!encp->enc_datapath_cap_evb ||
                                 has_representors);
        } else if (strcasecmp(switch_mode, SFC_KVARG_SWITCH_MODE_LEGACY) == 0) {
@@ -2531,7 +2783,6 @@ sfc_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
 
        /* Copy PCI device info to the dev->data */
        rte_eth_copy_pci_info(dev, pci_dev);
-       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
 
        rc = sfc_kvargs_parse(sa);
@@ -2572,9 +2823,9 @@ sfc_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
        if (rc != 0)
                goto fail_attach;
 
-       if (sa->switchdev && sa->mae.status != SFC_MAE_STATUS_SUPPORTED) {
+       if (sa->switchdev && sa->mae.status != SFC_MAE_STATUS_ADMIN) {
                sfc_err(sa,
-                       "failed to enable switchdev mode without MAE support");
+                       "failed to enable switchdev mode without admin MAE privilege");
                rc = ENOTSUP;
                goto fail_switchdev_no_mae;
        }
@@ -2703,18 +2954,143 @@ sfc_eth_dev_find_or_create(struct rte_pci_device *pci_dev,
        return 0;
 }
 
+static int
+sfc_eth_dev_create_repr(struct sfc_adapter *sa,
+                       efx_pcie_interface_t controller,
+                       uint16_t port,
+                       uint16_t repr_port,
+                       enum rte_eth_representor_type type)
+{
+       struct sfc_repr_entity_info entity;
+       efx_mport_sel_t mport_sel;
+       int rc;
+
+       switch (type) {
+       case RTE_ETH_REPRESENTOR_NONE:
+               return 0;
+       case RTE_ETH_REPRESENTOR_VF:
+       case RTE_ETH_REPRESENTOR_PF:
+               break;
+       case RTE_ETH_REPRESENTOR_SF:
+               sfc_err(sa, "SF representors are not supported");
+               return ENOTSUP;
+       default:
+               sfc_err(sa, "unknown representor type: %d", type);
+               return ENOTSUP;
+       }
+
+       rc = efx_mae_mport_by_pcie_mh_function(controller,
+                                              port,
+                                              repr_port,
+                                              &mport_sel);
+       if (rc != 0) {
+               sfc_err(sa,
+                       "failed to get m-port selector for controller %u port %u repr_port %u: %s",
+                       controller, port, repr_port, rte_strerror(-rc));
+               return rc;
+       }
+
+       memset(&entity, 0, sizeof(entity));
+       entity.type = type;
+       entity.intf = controller;
+       entity.pf = port;
+       entity.vf = repr_port;
+
+       rc = sfc_repr_create(sa->eth_dev, &entity, sa->mae.switch_domain_id,
+                            &mport_sel);
+       if (rc != 0) {
+               sfc_err(sa,
+                       "failed to create representor for controller %u port %u repr_port %u: %s",
+                       controller, port, repr_port, rte_strerror(-rc));
+               return rc;
+       }
+
+       return 0;
+}
+
+static int
+sfc_eth_dev_create_repr_port(struct sfc_adapter *sa,
+                            const struct rte_eth_devargs *eth_da,
+                            efx_pcie_interface_t controller,
+                            uint16_t port)
+{
+       int first_error = 0;
+       uint16_t i;
+       int rc;
+
+       if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+               return sfc_eth_dev_create_repr(sa, controller, port,
+                                              EFX_PCI_VF_INVALID,
+                                              eth_da->type);
+       }
+
+       for (i = 0; i < eth_da->nb_representor_ports; i++) {
+               rc = sfc_eth_dev_create_repr(sa, controller, port,
+                                            eth_da->representor_ports[i],
+                                            eth_da->type);
+               if (rc != 0 && first_error == 0)
+                       first_error = rc;
+       }
+
+       return first_error;
+}
+
+static int
+sfc_eth_dev_create_repr_controller(struct sfc_adapter *sa,
+                                  const struct rte_eth_devargs *eth_da,
+                                  efx_pcie_interface_t controller)
+{
+       const efx_nic_cfg_t *encp;
+       int first_error = 0;
+       uint16_t default_port;
+       uint16_t i;
+       int rc;
+
+       if (eth_da->nb_ports == 0) {
+               encp = efx_nic_cfg_get(sa->nic);
+               default_port = encp->enc_intf == controller ? encp->enc_pf : 0;
+               return sfc_eth_dev_create_repr_port(sa, eth_da, controller,
+                                                   default_port);
+       }
+
+       for (i = 0; i < eth_da->nb_ports; i++) {
+               rc = sfc_eth_dev_create_repr_port(sa, eth_da, controller,
+                                                 eth_da->ports[i]);
+               if (rc != 0 && first_error == 0)
+                       first_error = rc;
+       }
+
+       return first_error;
+}
+
 static int
 sfc_eth_dev_create_representors(struct rte_eth_dev *dev,
                                const struct rte_eth_devargs *eth_da)
 {
+       efx_pcie_interface_t intf;
+       const efx_nic_cfg_t *encp;
        struct sfc_adapter *sa;
-       unsigned int i;
+       uint16_t switch_domain_id;
+       uint16_t i;
        int rc;
 
-       if (eth_da->nb_representor_ports == 0)
-               return 0;
-
        sa = sfc_adapter_by_eth_dev(dev);
+       switch_domain_id = sa->mae.switch_domain_id;
+
+       switch (eth_da->type) {
+       case RTE_ETH_REPRESENTOR_NONE:
+               return 0;
+       case RTE_ETH_REPRESENTOR_PF:
+       case RTE_ETH_REPRESENTOR_VF:
+               break;
+       case RTE_ETH_REPRESENTOR_SF:
+               sfc_err(sa, "SF representors are not supported");
+               return -ENOTSUP;
+       default:
+               sfc_err(sa, "unknown representor type: %d",
+                       eth_da->type);
+               return -ENOTSUP;
+       }
 
        if (!sa->switchdev) {
                sfc_err(sa, "cannot create representors in non-switchdev mode");
@@ -2739,34 +3115,20 @@ sfc_eth_dev_create_representors(struct rte_eth_dev *dev,
                return -rc;
        }
 
-       for (i = 0; i < eth_da->nb_representor_ports; ++i) {
-               const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
-               struct sfc_repr_entity_info entity;
-               efx_mport_sel_t mport_sel;
-
-               rc = efx_mae_mport_by_pcie_function(encp->enc_pf,
-                               eth_da->representor_ports[i], &mport_sel);
-               if (rc != 0) {
-                       sfc_err(sa,
-                               "failed to get representor %u m-port: %s - ignore",
-                               eth_da->representor_ports[i],
-                               rte_strerror(-rc));
-                       continue;
-               }
-
-               memset(&entity, 0, sizeof(entity));
-               entity.type = eth_da->type;
-               entity.intf = encp->enc_intf;
-               entity.pf = encp->enc_pf;
-               entity.vf = eth_da->representor_ports[i];
-
-               rc = sfc_repr_create(dev, &entity, sa->mae.switch_domain_id,
-                                    &mport_sel);
-               if (rc != 0) {
-                       sfc_err(sa, "cannot create representor %u: %s - ignore",
-                               eth_da->representor_ports[i],
-                               rte_strerror(-rc));
+       if (eth_da->nb_mh_controllers > 0) {
+               for (i = 0; i < eth_da->nb_mh_controllers; i++) {
+                       rc = sfc_mae_switch_domain_get_intf(switch_domain_id,
+                                               eth_da->mh_controllers[i],
+                                               &intf);
+                       if (rc != 0) {
+                               sfc_err(sa, "failed to get representor");
+                               continue;
+                       }
+                       sfc_eth_dev_create_repr_controller(sa, eth_da, intf);
                }
+       } else {
+               encp = efx_nic_cfg_get(sa->nic);
+               sfc_eth_dev_create_repr_controller(sa, eth_da, encp->enc_intf);
        }
 
        return 0;
@@ -2790,9 +3152,13 @@ static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                memset(&eth_da, 0, sizeof(eth_da));
        }
 
-       init_data.nb_representors = eth_da.nb_representor_ports;
+       /* If no VF representors specified, check for PF ones */
+       if (eth_da.nb_representor_ports > 0)
+               init_data.nb_representors = eth_da.nb_representor_ports;
+       else
+               init_data.nb_representors = eth_da.nb_ports;
 
-       if (eth_da.nb_representor_ports > 0 &&
+       if (init_data.nb_representors > 0 &&
            rte_eal_process_type() != RTE_PROC_PRIMARY) {
                SFC_GENERIC_LOG(ERR,
                        "Create representors from secondary process not supported, dev '%s'",