ethdev: support representor id as iterator filter
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 83b82f1..a9cf988 100644 (file)
@@ -46,6 +46,7 @@
 #include "mlx5_defs.h"
 #include "mlx5_glue.h"
 #include "mlx5_mr.h"
+#include "mlx5_flow.h"
 
 /* Device parameter to enable RX completion queue compression. */
 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
@@ -89,6 +90,9 @@
 /* Allow L3 VXLAN flow creation. */
 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
 
+/* Activate DV flow steering. */
+#define MLX5_DV_FLOW_EN "dv_flow_en"
+
 /* Activate Netlink support in VF mode. */
 #define MLX5_VF_NL_EN "vf_nl_en"
 
@@ -282,8 +286,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                close(priv->nl_socket_route);
        if (priv->nl_socket_rdma >= 0)
                close(priv->nl_socket_rdma);
-       if (priv->mnl_socket)
-               mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+       if (priv->tcf_context)
+               mlx5_flow_tcf_context_destroy(priv->tcf_context);
        ret = mlx5_hrxq_ibv_verify(dev);
        if (ret)
                DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
@@ -401,6 +405,8 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
        .dev_close = mlx5_dev_close,
        .promiscuous_enable = mlx5_promiscuous_enable,
        .promiscuous_disable = mlx5_promiscuous_disable,
+       .allmulticast_enable = mlx5_allmulticast_enable,
+       .allmulticast_disable = mlx5_allmulticast_disable,
        .link_update = mlx5_link_update,
        .stats_get = mlx5_stats_get,
        .stats_reset = mlx5_stats_reset,
@@ -475,7 +481,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
        } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
                config->txqs_inline = tmp;
        } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
-               config->mps = !!tmp ? config->mps : 0;
+               config->mps = !!tmp;
        } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
                config->mpw_hdr_dseg = !!tmp;
        } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
@@ -488,6 +494,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                config->l3_vxlan_en = !!tmp;
        } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
                config->vf_nl_en = !!tmp;
+       } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
+               config->dv_flow_en = !!tmp;
        } else {
                DRV_LOG(WARNING, "%s: unknown parameter", key);
                rte_errno = EINVAL;
@@ -525,6 +533,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
                MLX5_RX_VEC_EN,
                MLX5_L3_VXLAN_EN,
                MLX5_VF_NL_EN,
+               MLX5_DV_FLOW_EN,
                MLX5_REPRESENTOR,
                NULL,
        };
@@ -566,11 +575,13 @@ static struct rte_pci_driver mlx5_driver;
 static void *uar_base;
 
 static int
-find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused,
+find_lower_va_bound(const struct rte_memseg_list *msl,
                const struct rte_memseg *ms, void *arg)
 {
        void **addr = arg;
 
+       if (msl->external)
+               return 0;
        if (*addr == NULL)
                *addr = ms->addr;
        else
@@ -700,6 +711,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        struct mlx5dv_context dv_attr = { .comp_mask = 0 };
        struct mlx5_dev_config config = {
                .vf = !!vf,
+               .mps = MLX5_ARG_UNSET,
                .tx_vec_en = 1,
                .rx_vec_en = 1,
                .mpw_hdr_dseg = 0,
@@ -789,7 +801,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                DRV_LOG(DEBUG, "MPW isn't supported");
                mps = MLX5_MPW_DISABLED;
        }
-       config.mps = mps;
 #ifdef HAVE_IBV_MLX5_MOD_SWP
        if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
                swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
@@ -1033,13 +1044,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                       (1 << IBV_QPT_RAW_PACKET)));
        if (config.tso)
                config.tso_max_payload_sz = attr.tso_caps.max_tso;
-       if (config.mps && !mps) {
-               DRV_LOG(ERR,
-                       "multi-packet send not supported on this device"
-                       " (" MLX5_TXQ_MPW_EN ")");
-               err = ENOTSUP;
-               goto error;
-       }
+       /*
+        * MPW is disabled by default, while the Enhanced MPW is enabled
+        * by default.
+        */
+       if (config.mps == MLX5_ARG_UNSET)
+               config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
+                                                         MLX5_MPW_DISABLED;
+       else
+               config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
        DRV_LOG(INFO, "%sMPS is %s",
                config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
                config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
@@ -1071,13 +1084,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                err = ENOMEM;
                goto error;
        }
-       if (priv->representor)
+       if (priv->representor) {
                eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+               eth_dev->data->representor_id = priv->representor_id;
+       }
        eth_dev->data->dev_private = priv;
        priv->dev_data = eth_dev->data;
        eth_dev->data->mac_addrs = priv->mac;
        eth_dev->device = dpdk_dev;
-       eth_dev->device->driver = &mlx5_driver.driver;
        err = mlx5_uar_init_primary(eth_dev);
        if (err) {
                err = rte_errno;
@@ -1126,8 +1140,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
        if (vf && config.vf_nl_en)
                mlx5_nl_mac_addr_sync(eth_dev);
-       priv->mnl_socket = mlx5_nl_flow_socket_create();
-       if (!priv->mnl_socket) {
+       priv->tcf_context = mlx5_flow_tcf_context_create();
+       if (!priv->tcf_context) {
                err = -rte_errno;
                DRV_LOG(WARNING,
                        "flow rules relying on switch offloads will not be"
@@ -1142,16 +1156,16 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                        error.message =
                                "cannot retrieve network interface index";
                } else {
-                       err = mlx5_nl_flow_init(priv->mnl_socket, ifindex,
-                                               &error);
+                       err = mlx5_flow_tcf_init(priv->tcf_context,
+                                                ifindex, &error);
                }
                if (err) {
                        DRV_LOG(WARNING,
                                "flow rules relying on switch offloads will"
                                " not be supported: %s: %s",
                                error.message, strerror(rte_errno));
-                       mlx5_nl_flow_socket_destroy(priv->mnl_socket);
-                       priv->mnl_socket = NULL;
+                       mlx5_flow_tcf_context_destroy(priv->tcf_context);
+                       priv->tcf_context = NULL;
                }
        }
        TAILQ_INIT(&priv->flows);
@@ -1206,16 +1220,21 @@ error:
                        close(priv->nl_socket_route);
                if (priv->nl_socket_rdma >= 0)
                        close(priv->nl_socket_rdma);
-               if (priv->mnl_socket)
-                       mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+               if (priv->tcf_context)
+                       mlx5_flow_tcf_context_destroy(priv->tcf_context);
                if (own_domain_id)
                        claim_zero(rte_eth_switch_domain_free(priv->domain_id));
                rte_free(priv);
+               if (eth_dev != NULL)
+                       eth_dev->data->dev_private = NULL;
        }
        if (pd)
                claim_zero(mlx5_glue->dealloc_pd(pd));
-       if (eth_dev)
+       if (eth_dev != NULL) {
+               /* mac_addrs must not be freed alone because part of dev_private */
+               eth_dev->data->mac_addrs = NULL;
                rte_eth_dev_release_port(eth_dev);
+       }
        if (ctx)
                claim_zero(mlx5_glue->close_device(ctx));
        assert(err > 0);
@@ -1435,8 +1454,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                        if (!list[i].eth_dev)
                                continue;
                        mlx5_dev_close(list[i].eth_dev);
-                       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-                               rte_free(list[i].eth_dev->data->dev_private);
+                       /* mac_addrs must not be freed because in dev_private */
+                       list[i].eth_dev->data->mac_addrs = NULL;
                        claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
                }
                /* Restore original error. */
@@ -1484,6 +1503,10 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
                RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
                               PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
        },
+       {
+               RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+                              PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
+       },
        {
                .vendor_id = 0
        }