net/mlx5: fix devargs validation for multi-class probing
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 859637f..aa5f313 100644 (file)
@@ -19,6 +19,7 @@
 #include <rte_rwlock.h>
 #include <rte_spinlock.h>
 #include <rte_string_fns.h>
+#include <rte_eal_paging.h>
 #include <rte_alarm.h>
 #include <rte_cycles.h>
 
@@ -36,7 +37,6 @@
 #include "mlx5_rx.h"
 #include "mlx5_tx.h"
 #include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
 #include "mlx5_flow.h"
 #include "mlx5_flow_os.h"
 #include "rte_pmd_mlx5.h"
 /* Device parameter to configure implicit registration of mempool memory. */
 #define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en"
 
+/* Device parameter to configure the delay drop when creating Rxqs. */
+#define MLX5_DELAY_DROP "delay_drop"
+
 /* Shared memory between primary and secondary processes. */
 struct mlx5_shared_data *mlx5_shared_data;
 
@@ -379,7 +382,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
        },
 };
 
-
 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
 
@@ -451,7 +453,7 @@ mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
                mlx5_free(sh->aso_age_mng);
                return -1;
        }
-       rte_spinlock_init(&sh->aso_age_mng->resize_sl);
+       rte_rwlock_init(&sh->aso_age_mng->resize_rwl);
        rte_spinlock_init(&sh->aso_age_mng->free_sl);
        LIST_INIT(&sh->aso_age_mng->free);
        return 0;
@@ -546,8 +548,7 @@ mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
        uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
 
        LIST_REMOVE(mng, next);
-       claim_zero(mlx5_devx_cmd_destroy(mng->dm));
-       claim_zero(mlx5_os_umem_dereg(mng->umem));
+       mlx5_os_wrapped_mkey_destroy(&mng->wm);
        mlx5_free(mem);
 }
 
@@ -628,6 +629,7 @@ mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
                }
                if (sh->meter_aso_en) {
                        rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl);
+                       rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl);
                        LIST_INIT(&sh->mtrmng->pools_mng.meters);
                }
                sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
@@ -861,8 +863,7 @@ bool
 mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flex_parser_profiles *prf =
-                               &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+       struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
        return !!prf->obj;
 }
@@ -881,8 +882,7 @@ int
 mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flex_parser_profiles *prf =
-                               &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+       struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
        struct mlx5_devx_graph_node_attr node = {
                .modify_field_select = 0,
        };
@@ -916,7 +916,7 @@ mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
         * start after the common header that with the length of a DW(u32).
         */
        node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
-       prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node);
+       prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->cdev->ctx, &node);
        if (!prf->obj) {
                DRV_LOG(ERR, "Failed to create flex parser node object.");
                return (rte_errno == 0) ? -ENODEV : -rte_errno;
@@ -945,8 +945,7 @@ static void
 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flex_parser_profiles *prf =
-                               &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+       struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
        if (prf->obj)
                mlx5_devx_cmd_destroy(prf->obj);
@@ -969,190 +968,54 @@ mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
        return sw_parsing_offloads;
 }
 
-/*
- * Allocate Rx and Tx UARs in robust fashion.
- * This routine handles the following UAR allocation issues:
- *
- *  - tries to allocate the UAR with the most appropriate memory
- *    mapping type from the ones supported by the host
- *
- *  - tries to allocate the UAR with non-NULL base address
- *    OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as
- *    UAR base address if UAR was not the first object in the UAR page.
- *    It caused the PMD failure and we should try to get another UAR
- *    till we get the first one with non-NULL base address returned.
- */
-static int
-mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
-                    const struct mlx5_dev_config *config)
+uint32_t
+mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
 {
-       uint32_t uar_mapping, retry;
-       int err = 0;
-       void *base_addr;
-
-       for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
-               /* Control the mapping type according to the settings. */
-               uar_mapping = (config->dbnc == MLX5_TXDB_NCACHED) ?
-                             MLX5DV_UAR_ALLOC_TYPE_NC :
-                             MLX5DV_UAR_ALLOC_TYPE_BF;
-#else
-               RTE_SET_USED(config);
-               /*
-                * It seems we have no way to control the memory mapping type
-                * for the UAR, the default "Write-Combining" type is supposed.
-                * The UAR initialization on queue creation queries the
-                * actual mapping type done by Verbs/kernel and setups the
-                * PMD datapath accordingly.
-                */
-               uar_mapping = 0;
-#endif
-               sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, uar_mapping);
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
-               if (!sh->tx_uar &&
-                   uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
-                       if (config->dbnc == MLX5_TXDB_CACHED ||
-                           config->dbnc == MLX5_TXDB_HEURISTIC)
-                               DRV_LOG(WARNING, "Devarg tx_db_nc setting "
-                                                "is not supported by DevX");
-                       /*
-                        * In some environments like virtual machine
-                        * the Write Combining mapped might be not supported
-                        * and UAR allocation fails. We try "Non-Cached"
-                        * mapping for the case. The tx_burst routines take
-                        * the UAR mapping type into account on UAR setup
-                        * on queue creation.
-                        */
-                       DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)");
-                       uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
-                       sh->tx_uar = mlx5_glue->devx_alloc_uar
-                                                       (sh->ctx, uar_mapping);
-               } else if (!sh->tx_uar &&
-                          uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
-                       if (config->dbnc == MLX5_TXDB_NCACHED)
-                               DRV_LOG(WARNING, "Devarg tx_db_nc settings "
-                                                "is not supported by DevX");
-                       /*
-                        * If Verbs/kernel does not support "Non-Cached"
-                        * try the "Write-Combining".
-                        */
-                       DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)");
-                       uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
-                       sh->tx_uar = mlx5_glue->devx_alloc_uar
-                                                       (sh->ctx, uar_mapping);
-               }
-#endif
-               if (!sh->tx_uar) {
-                       DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (BF/NC)");
-                       err = ENOMEM;
-                       goto exit;
-               }
-               base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
-               if (base_addr)
-                       break;
-               /*
-                * The UARs are allocated by rdma_core within the
-                * IB device context, on context closure all UARs
-                * will be freed, should be no memory/object leakage.
-                */
-               DRV_LOG(DEBUG, "Retrying to allocate Tx DevX UAR");
-               sh->tx_uar = NULL;
-       }
-       /* Check whether we finally succeeded with valid UAR allocation. */
-       if (!sh->tx_uar) {
-               DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (NULL base)");
-               err = ENOMEM;
-               goto exit;
-       }
-       for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
-               uar_mapping = 0;
-               sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
-                                                       (sh->ctx, uar_mapping);
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
-               if (!sh->devx_rx_uar &&
-                   uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
-                       /*
-                        * Rx UAR is used to control interrupts only,
-                        * should be no datapath noticeable impact,
-                        * can try "Non-Cached" mapping safely.
-                        */
-                       DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)");
-                       uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
-                       sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
-                                                       (sh->ctx, uar_mapping);
-               }
-#endif
-               if (!sh->devx_rx_uar) {
-                       DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (BF/NC)");
-                       err = ENOMEM;
-                       goto exit;
-               }
-               base_addr = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
-               if (base_addr)
-                       break;
-               /*
-                * The UARs are allocated by rdma_core within the
-                * IB device context, on context closure all UARs
-                * will be freed, should be no memory/object leakage.
-                */
-               DRV_LOG(DEBUG, "Retrying to allocate Rx DevX UAR");
-               sh->devx_rx_uar = NULL;
-       }
-       /* Check whether we finally succeeded with valid UAR allocation. */
-       if (!sh->devx_rx_uar) {
-               DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (NULL base)");
-               err = ENOMEM;
-       }
-exit:
-       return err;
+       uint32_t tn_offloads = 0;
+
+       if (attr->tunnel_stateless_vxlan)
+               tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP;
+       if (attr->tunnel_stateless_gre)
+               tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP;
+       if (attr->tunnel_stateless_geneve_rx)
+               tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP;
+       return tn_offloads;
 }
 
-/**
- * Unregister the mempool from the protection domain.
- *
- * @param sh
- *   Pointer to the device shared context.
- * @param mp
- *   Mempool being unregistered.
- */
-static void
-mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
-                                      struct rte_mempool *mp)
+/* Fill all fields of UAR structure. */
+static int
+mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh)
 {
-       struct mlx5_mp_id mp_id;
+       int ret;
 
-       mlx5_mp_id_init(&mp_id, 0);
-       if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)
-               DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
-                       mp->name, sh->pd, rte_strerror(rte_errno));
+       ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar);
+       if (ret) {
+               DRV_LOG(ERR, "Failed to prepare Tx DevX UAR.");
+               return -rte_errno;
+       }
+       MLX5_ASSERT(sh->tx_uar.obj);
+       MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj));
+       ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar);
+       if (ret) {
+               DRV_LOG(ERR, "Failed to prepare Rx DevX UAR.");
+               mlx5_devx_uar_release(&sh->tx_uar);
+               return -rte_errno;
+       }
+       MLX5_ASSERT(sh->rx_uar.obj);
+       MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj));
+       return 0;
 }
 
-/**
- * rte_mempool_walk() callback to register mempools
- * for the protection domain.
- *
- * @param mp
- *   The mempool being walked.
- * @param arg
- *   Pointer to the device shared context.
- */
 static void
-mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
+mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh)
 {
-       struct mlx5_dev_ctx_shared *sh = arg;
-       struct mlx5_mp_id mp_id;
-       int ret;
-
-       mlx5_mp_id_init(&mp_id, 0);
-       ret = mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp, &mp_id);
-       if (ret < 0 && rte_errno != EEXIST)
-               DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
-                       mp->name, sh->pd, rte_strerror(rte_errno));
+       mlx5_devx_uar_release(&sh->rx_uar);
+       mlx5_devx_uar_release(&sh->tx_uar);
 }
 
 /**
- * rte_mempool_walk() callback to unregister mempools
- * from the protection domain.
+ * rte_mempool_walk() callback to unregister Rx mempools.
+ * It used when implicit mempool registration is disabled.
  *
  * @param mp
  *   The mempool being walked.
@@ -1160,41 +1023,11 @@ mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
  *   Pointer to the device shared context.
  */
 static void
-mlx5_dev_ctx_shared_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
-{
-       mlx5_dev_ctx_shared_mempool_unregister
-                               ((struct mlx5_dev_ctx_shared *)arg, mp);
-}
-
-/**
- * Mempool life cycle callback for Ethernet devices.
- *
- * @param event
- *   Mempool life cycle event.
- * @param mp
- *   Associated mempool.
- * @param arg
- *   Pointer to a device shared context.
- */
-static void
-mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
-                                    struct rte_mempool *mp, void *arg)
+mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
 {
        struct mlx5_dev_ctx_shared *sh = arg;
-       struct mlx5_mp_id mp_id;
-
-       switch (event) {
-       case RTE_MEMPOOL_EVENT_READY:
-               mlx5_mp_id_init(&mp_id, 0);
-               if (mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp,
-                                            &mp_id) < 0)
-                       DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
-                               mp->name, sh->pd, rte_strerror(rte_errno));
-               break;
-       case RTE_MEMPOOL_EVENT_DESTROY:
-               mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
-               break;
-       }
+
+       mlx5_dev_mempool_unregister(sh->cdev, mp);
 }
 
 /**
@@ -1215,7 +1048,7 @@ mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
        struct mlx5_dev_ctx_shared *sh = arg;
 
        if (event == RTE_MEMPOOL_EVENT_DESTROY)
-               mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+               mlx5_dev_mempool_unregister(sh->cdev, mp);
 }
 
 int
@@ -1226,19 +1059,73 @@ mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
        int ret;
 
        /* Check if we only need to track Rx mempool destruction. */
-       if (!priv->config.mr_mempool_reg_en) {
+       if (!sh->cdev->config.mr_mempool_reg_en) {
                ret = rte_mempool_event_callback_register
                                (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
                return ret == 0 || rte_errno == EEXIST ? 0 : ret;
        }
-       /* Callback for this shared context may be already registered. */
-       ret = rte_mempool_event_callback_register
-                               (mlx5_dev_ctx_shared_mempool_event_cb, sh);
-       if (ret != 0 && rte_errno != EEXIST)
-               return ret;
-       /* Register mempools only once for this shared context. */
-       if (ret == 0)
-               rte_mempool_walk(mlx5_dev_ctx_shared_mempool_register_cb, sh);
+       return mlx5_dev_mempool_subscribe(sh->cdev);
+}
+
+/**
+ * Set up multiple TISs with different affinities according to
+ * number of bonding ports
+ *
+ * @param priv
+ * Pointer of shared context.
+ *
+ * @return
+ * Zero on success, -1 otherwise.
+ */
+static int
+mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
+{
+       int i;
+       struct mlx5_devx_lag_context lag_ctx = { 0 };
+       struct mlx5_devx_tis_attr tis_attr = { 0 };
+
+       tis_attr.transport_domain = sh->td->id;
+       if (sh->bond.n_port) {
+               if (!mlx5_devx_cmd_query_lag(sh->cdev->ctx, &lag_ctx)) {
+                       sh->lag.tx_remap_affinity[0] =
+                               lag_ctx.tx_remap_affinity_1;
+                       sh->lag.tx_remap_affinity[1] =
+                               lag_ctx.tx_remap_affinity_2;
+                       sh->lag.affinity_mode = lag_ctx.port_select_mode;
+               } else {
+                       DRV_LOG(ERR, "Failed to query lag affinity.");
+                       return -1;
+               }
+               if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) {
+                       for (i = 0; i < sh->bond.n_port; i++) {
+                               tis_attr.lag_tx_port_affinity =
+                                       MLX5_IFC_LAG_MAP_TIS_AFFINITY(i,
+                                                       sh->bond.n_port);
+                               sh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx,
+                                               &tis_attr);
+                               if (!sh->tis[i]) {
+                                       DRV_LOG(ERR, "Failed to TIS %d/%d for bonding device"
+                                               " %s.", i, sh->bond.n_port,
+                                               sh->ibdev_name);
+                                       return -1;
+                               }
+                       }
+                       DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n",
+                               sh->bond.n_port, lag_ctx.tx_remap_affinity_1,
+                               lag_ctx.tx_remap_affinity_2);
+                       return 0;
+               }
+               if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)
+                       DRV_LOG(INFO, "Device %s enabled HW hash based LAG.",
+                                       sh->ibdev_name);
+       }
+       tis_attr.lag_tx_port_affinity = 0;
+       sh->tis[0] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
+       if (!sh->tis[0]) {
+               DRV_LOG(ERR, "Failed to TIS 0 for bonding device"
+                       " %s.", sh->ibdev_name);
+               return -1;
+       }
        return 0;
 }
 
@@ -1264,12 +1151,11 @@ mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
  */
 struct mlx5_dev_ctx_shared *
 mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
-                          const struct mlx5_dev_config *config)
+                         const struct mlx5_dev_config *config)
 {
        struct mlx5_dev_ctx_shared *sh;
        int err = 0;
        uint32_t i;
-       struct mlx5_devx_tis_attr tis_attr = { 0 };
 
        MLX5_ASSERT(spawn);
        /* Secondary process should not create the shared context. */
@@ -1277,8 +1163,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
        /* Search for IB context by device name. */
        LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
-               if (!strcmp(sh->ibdev_name,
-                       mlx5_os_get_dev_device_name(spawn->phys_dev))) {
+               if (!strcmp(sh->ibdev_name, spawn->phys_dev_name)) {
                        sh->refcnt++;
                        goto exit;
                }
@@ -1295,13 +1180,13 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                rte_errno  = ENOMEM;
                goto exit;
        }
-       sh->numa_node = spawn->numa_node;
+       pthread_mutex_init(&sh->txpp.mutex, NULL);
+       sh->numa_node = spawn->cdev->dev->numa_node;
+       sh->cdev = spawn->cdev;
+       sh->devx = sh->cdev->config.devx;
        if (spawn->bond_info)
                sh->bond = *spawn->bond_info;
-       err = mlx5_os_open_device(spawn, config, sh);
-       if (!sh->ctx)
-               goto error;
-       err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr);
+       err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
        if (err) {
                DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
                goto error;
@@ -1309,9 +1194,9 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        sh->refcnt = 1;
        sh->max_port = spawn->max_port;
        sh->reclaim_mode = config->reclaim_mode;
-       strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx),
+       strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx),
                sizeof(sh->ibdev_name) - 1);
-       strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx),
+       strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
                sizeof(sh->ibdev_path) - 1);
        /*
         * Setting port_id to max unallowed value means
@@ -1322,64 +1207,29 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
                sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
        }
-       sh->pd = mlx5_os_alloc_pd(sh->ctx);
-       if (sh->pd == NULL) {
-               DRV_LOG(ERR, "PD allocation failure");
-               err = ENOMEM;
-               goto error;
-       }
        if (sh->devx) {
-               err = mlx5_os_get_pdn(sh->pd, &sh->pdn);
-               if (err) {
-                       DRV_LOG(ERR, "Fail to extract pdn from PD");
-                       goto error;
-               }
-               sh->td = mlx5_devx_cmd_create_td(sh->ctx);
+               sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
                if (!sh->td) {
                        DRV_LOG(ERR, "TD allocation failure");
                        err = ENOMEM;
                        goto error;
                }
-               tis_attr.transport_domain = sh->td->id;
-               sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
-               if (!sh->tis) {
+               if (mlx5_setup_tis(sh)) {
                        DRV_LOG(ERR, "TIS allocation failure");
                        err = ENOMEM;
                        goto error;
                }
-               err = mlx5_alloc_rxtx_uars(sh, config);
+               err = mlx5_rxtx_uars_prepare(sh);
                if (err)
                        goto error;
-               MLX5_ASSERT(sh->tx_uar);
-               MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar));
-
-               MLX5_ASSERT(sh->devx_rx_uar);
-               MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar));
-       }
 #ifndef RTE_ARCH_64
-       /* Initialize UAR access locks for 32bit implementations. */
-       rte_spinlock_init(&sh->uar_lock_cq);
-       for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
-               rte_spinlock_init(&sh->uar_lock[i]);
+       } else {
+               /* Initialize UAR access locks for 32bit implementations. */
+               rte_spinlock_init(&sh->uar_lock_cq);
+               for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+                       rte_spinlock_init(&sh->uar_lock[i]);
 #endif
-       /*
-        * Once the device is added to the list of memory event
-        * callback, its global MR cache table cannot be expanded
-        * on the fly because of deadlock. If it overflows, lookup
-        * should be done by searching MR list linearly, which is slow.
-        *
-        * At this point the device is not added to the memory
-        * event list yet, context is just being created.
-        */
-       err = mlx5_mr_btree_init(&sh->share_cache.cache,
-                                MLX5_MR_BTREE_CACHE_N * 2,
-                                sh->numa_node);
-       if (err) {
-               err = rte_errno;
-               goto error;
        }
-       mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
-                             &sh->share_cache.dereg_mr_cb);
        mlx5_os_dev_shared_handler_install(sh);
        if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
                err = mlx5_flow_os_init_workspace_once();
@@ -1389,11 +1239,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        mlx5_flow_aging_init(sh);
        mlx5_flow_counters_mng_init(sh);
        mlx5_flow_ipool_create(sh, config);
-       /* Add device to memory callback list. */
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
-                        sh, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
        /* Add context to the global device list. */
        LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
        rte_spinlock_init(&sh->geneve_tlv_opt_sl);
@@ -1404,20 +1249,14 @@ error:
        pthread_mutex_destroy(&sh->txpp.mutex);
        pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
        MLX5_ASSERT(sh);
-       if (sh->share_cache.cache.table)
-               mlx5_mr_btree_free(&sh->share_cache.cache);
-       if (sh->tis)
-               claim_zero(mlx5_devx_cmd_destroy(sh->tis));
        if (sh->td)
                claim_zero(mlx5_devx_cmd_destroy(sh->td));
-       if (sh->devx_rx_uar)
-               mlx5_glue->devx_free_uar(sh->devx_rx_uar);
-       if (sh->tx_uar)
-               mlx5_glue->devx_free_uar(sh->tx_uar);
-       if (sh->pd)
-               claim_zero(mlx5_os_dealloc_pd(sh->pd));
-       if (sh->ctx)
-               claim_zero(mlx5_glue->close_device(sh->ctx));
+       i = 0;
+       do {
+               if (sh->tis[i])
+                       claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
+       } while (++i < (uint32_t)sh->bond.n_port);
+       mlx5_rxtx_uars_release(sh);
        mlx5_free(sh);
        MLX5_ASSERT(err > 0);
        rte_errno = err;
@@ -1435,6 +1274,7 @@ void
 mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 {
        int ret;
+       int i = 0;
 
        pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
@@ -1457,26 +1297,25 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
        if (--sh->refcnt)
                goto exit;
        /* Stop watching for mempool events and unregister all mempools. */
-       ret = rte_mempool_event_callback_unregister
-                               (mlx5_dev_ctx_shared_mempool_event_cb, sh);
-       if (ret < 0 && rte_errno == ENOENT)
+       if (!sh->cdev->config.mr_mempool_reg_en) {
                ret = rte_mempool_event_callback_unregister
                                (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
-       if (ret == 0)
-               rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
-                                sh);
-       /* Remove from memory callback device list. */
-       rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-       LIST_REMOVE(sh, mem_event_cb);
-       rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
-       /* Release created Memory Regions. */
-       mlx5_mr_release_cache(&sh->share_cache);
+               if (ret == 0)
+                       rte_mempool_walk
+                            (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh);
+       }
        /* Remove context from the global device list. */
        LIST_REMOVE(sh, next);
-       /* Release flow workspaces objects on the last device. */
-       if (LIST_EMPTY(&mlx5_dev_ctx_list))
+       /* Release resources on the last device removal. */
+       if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
+               mlx5_os_net_cleanup();
                mlx5_flow_os_release_workspace();
+       }
        pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+       if (sh->flex_parsers_dv) {
+               mlx5_list_destroy(sh->flex_parsers_dv);
+               sh->flex_parsers_dv = NULL;
+       }
        /*
         *  Ensure there is no async event handler installed.
         *  Only primary process handles async device events.
@@ -1490,20 +1329,13 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
                mlx5_aso_flow_mtrs_mng_close(sh);
        mlx5_flow_ipool_destroy(sh);
        mlx5_os_dev_shared_handler_uninstall(sh);
-       if (sh->tx_uar) {
-               mlx5_glue->devx_free_uar(sh->tx_uar);
-               sh->tx_uar = NULL;
-       }
-       if (sh->pd)
-               claim_zero(mlx5_os_dealloc_pd(sh->pd));
-       if (sh->tis)
-               claim_zero(mlx5_devx_cmd_destroy(sh->tis));
+       mlx5_rxtx_uars_release(sh);
+       do {
+               if (sh->tis[i])
+                       claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
+       } while (++i < sh->bond.n_port);
        if (sh->td)
                claim_zero(mlx5_devx_cmd_destroy(sh->td));
-       if (sh->devx_rx_uar)
-               mlx5_glue->devx_free_uar(sh->devx_rx_uar);
-       if (sh->ctx)
-               claim_zero(mlx5_glue->close_device(sh->ctx));
        MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
        pthread_mutex_destroy(&sh->txpp.mutex);
        mlx5_free(sh);
@@ -1623,10 +1455,10 @@ mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
                         struct rte_eth_udp_tunnel *udp_tunnel)
 {
        MLX5_ASSERT(udp_tunnel != NULL);
-       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+       if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
            udp_tunnel->udp_port == 4789)
                return 0;
-       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+       if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
            udp_tunnel->udp_port == 4790)
                return 0;
        return -ENOTSUP;
@@ -1653,8 +1485,8 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev)
         * UAR register table follows the process private structure. BlueFlame
         * registers for Tx queues are stored in the table.
         */
-       ppriv_size =
-               sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
+       ppriv_size = sizeof(struct mlx5_proc_priv) +
+                    priv->txqs_n * sizeof(struct mlx5_uar_data);
        ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size,
                            RTE_CACHE_LINE_SIZE, dev->device->numa_node);
        if (!ppriv) {
@@ -1663,6 +1495,8 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev)
        }
        ppriv->uar_table_sz = priv->txqs_n;
        dev->process_private = ppriv;
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               priv->sh->pppriv = ppriv;
        return 0;
 }
 
@@ -1709,8 +1543,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                return 0;
        DRV_LOG(DEBUG, "port %u closing device \"%s\"",
                dev->data->port_id,
-               ((priv->sh->ctx != NULL) ?
-               mlx5_os_get_ctx_device_name(priv->sh->ctx) : ""));
+               ((priv->sh->cdev->ctx != NULL) ?
+               mlx5_os_get_ctx_device_name(priv->sh->cdev->ctx) : ""));
        /*
         * If default mreg copy action is removed at the stop stage,
         * the search will return none and nothing will be done anymore.
@@ -1732,18 +1566,15 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        mlx5_mp_os_req_stop_rxtx(dev);
        /* Free the eCPRI flex parser resource. */
        mlx5_flex_parser_ecpri_release(dev);
-       if (priv->rxqs != NULL) {
+       mlx5_flex_item_port_cleanup(dev);
+       if (priv->rxq_privs != NULL) {
                /* XXX race condition if mlx5_rx_burst() is still running. */
                rte_delay_us_sleep(1000);
                for (i = 0; (i != priv->rxqs_n); ++i)
                        mlx5_rxq_release(dev, i);
                priv->rxqs_n = 0;
-               priv->rxqs = NULL;
-       }
-       if (priv->representor) {
-               /* Each representor has a dedicated interrupts handler */
-               mlx5_free(dev->intr_handle);
-               dev->intr_handle = NULL;
+               mlx5_free(priv->rxq_privs);
+               priv->rxq_privs = NULL;
        }
        if (priv->txqs != NULL) {
                /* XXX race condition if mlx5_tx_burst() is still running. */
@@ -2020,7 +1851,10 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
        signed long tmp;
 
        /* No-op, port representors are processed in mlx5_dev_spawn(). */
-       if (!strcmp(MLX5_DRIVER_KEY, key) || !strcmp(MLX5_REPRESENTOR, key))
+       if (!strcmp(MLX5_DRIVER_KEY, key) || !strcmp(MLX5_REPRESENTOR, key) ||
+           !strcmp(MLX5_SYS_MEM_EN, key) || !strcmp(MLX5_TX_DB_NC, key) ||
+           !strcmp(MLX5_MR_MEMPOOL_REG_EN, key) ||
+           !strcmp(MLX5_MR_EXT_MEMSEG_EN, key))
                return 0;
        errno = 0;
        tmp = strtol(val, NULL, 0);
@@ -2073,16 +1907,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
        } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
                config->mps = !!tmp;
-       } else if (strcmp(MLX5_TX_DB_NC, key) == 0) {
-               if (tmp != MLX5_TXDB_CACHED &&
-                   tmp != MLX5_TXDB_NCACHED &&
-                   tmp != MLX5_TXDB_HEURISTIC) {
-                       DRV_LOG(ERR, "invalid Tx doorbell "
-                                    "mapping parameter");
-                       rte_errno = EINVAL;
-                       return -rte_errno;
-               }
-               config->dbnc = tmp;
        } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
                DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
        } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
@@ -2126,8 +1950,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                        config->dv_miss_info = 1;
        } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
                config->lacp_by_user = !!tmp;
-       } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
-               config->mr_ext_memseg_en = !!tmp;
        } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
                config->max_dump_files_num = tmp;
        } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
@@ -2145,18 +1967,17 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                        return -rte_errno;
                }
                config->reclaim_mode = tmp;
-       } else if (strcmp(MLX5_SYS_MEM_EN, key) == 0) {
-               config->sys_mem_en = !!tmp;
        } else if (strcmp(MLX5_DECAP_EN, key) == 0) {
                config->decap_en = !!tmp;
        } else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
                config->allow_duplicate_pattern = !!tmp;
-       } else if (strcmp(MLX5_MR_MEMPOOL_REG_EN, key) == 0) {
-               config->mr_mempool_reg_en = !!tmp;
+       } else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
+               config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
+               config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
        } else {
-               DRV_LOG(WARNING, "%s: unknown parameter", key);
-               rte_errno = EINVAL;
-               return -rte_errno;
+               DRV_LOG(WARNING,
+                       "%s: unknown parameter, maybe it's for another class.",
+                       key);
        }
        return 0;
 }
@@ -2175,74 +1996,25 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
 int
 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
 {
-       const char **params = (const char *[]){
-               MLX5_DRIVER_KEY,
-               MLX5_RXQ_CQE_COMP_EN,
-               MLX5_RXQ_PKT_PAD_EN,
-               MLX5_RX_MPRQ_EN,
-               MLX5_RX_MPRQ_LOG_STRIDE_NUM,
-               MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
-               MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
-               MLX5_RXQS_MIN_MPRQ,
-               MLX5_TXQ_INLINE,
-               MLX5_TXQ_INLINE_MIN,
-               MLX5_TXQ_INLINE_MAX,
-               MLX5_TXQ_INLINE_MPW,
-               MLX5_TXQS_MIN_INLINE,
-               MLX5_TXQS_MAX_VEC,
-               MLX5_TXQ_MPW_EN,
-               MLX5_TXQ_MPW_HDR_DSEG_EN,
-               MLX5_TXQ_MAX_INLINE_LEN,
-               MLX5_TX_DB_NC,
-               MLX5_TX_PP,
-               MLX5_TX_SKEW,
-               MLX5_TX_VEC_EN,
-               MLX5_RX_VEC_EN,
-               MLX5_L3_VXLAN_EN,
-               MLX5_VF_NL_EN,
-               MLX5_DV_ESW_EN,
-               MLX5_DV_FLOW_EN,
-               MLX5_DV_XMETA_EN,
-               MLX5_LACP_BY_USER,
-               MLX5_MR_EXT_MEMSEG_EN,
-               MLX5_REPRESENTOR,
-               MLX5_MAX_DUMP_FILES_NUM,
-               MLX5_LRO_TIMEOUT_USEC,
-               RTE_DEVARGS_KEY_CLASS,
-               MLX5_HP_BUF_SIZE,
-               MLX5_RECLAIM_MEM,
-               MLX5_SYS_MEM_EN,
-               MLX5_DECAP_EN,
-               MLX5_ALLOW_DUPLICATE_PATTERN,
-               MLX5_MR_MEMPOOL_REG_EN,
-               NULL,
-       };
        struct rte_kvargs *kvlist;
        int ret = 0;
-       int i;
 
        if (devargs == NULL)
                return 0;
        /* Following UGLY cast is done to pass checkpatch. */
-       kvlist = rte_kvargs_parse(devargs->args, params);
+       kvlist = rte_kvargs_parse(devargs->args, NULL);
        if (kvlist == NULL) {
                rte_errno = EINVAL;
                return -rte_errno;
        }
        /* Process parameters. */
-       for (i = 0; (params[i] != NULL); ++i) {
-               if (rte_kvargs_count(kvlist, params[i])) {
-                       ret = rte_kvargs_process(kvlist, params[i],
-                                                mlx5_args_check, config);
-                       if (ret) {
-                               rte_errno = EINVAL;
-                               rte_kvargs_free(kvlist);
-                               return -rte_errno;
-                       }
-               }
+       ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config);
+       if (ret) {
+               rte_errno = EINVAL;
+               ret = -rte_errno;
        }
        rte_kvargs_free(kvlist);
-       return 0;
+       return ret;
 }
 
 /**
@@ -2542,19 +2314,19 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
  *
  * This function removes all Ethernet devices belong to a given device.
  *
- * @param[in] dev
+ * @param[in] cdev
  *   Pointer to the generic device.
  *
  * @return
  *   0 on success, the function cannot fail.
  */
 int
-mlx5_net_remove(struct rte_device *dev)
+mlx5_net_remove(struct mlx5_common_device *cdev)
 {
        uint16_t port_id;
        int ret = 0;
 
-       RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
+       RTE_ETH_FOREACH_DEV_OF(port_id, cdev->dev) {
                /*
                 * mlx5_dev_close() is not registered to secondary process,
                 * call the close function explicitly for secondary process.
@@ -2651,8 +2423,6 @@ static struct mlx5_class_driver mlx5_net_driver = {
        .id_table = mlx5_pci_id_map,
        .probe = mlx5_os_net_probe,
        .remove = mlx5_net_remove,
-       .dma_map = mlx5_net_dma_map,
-       .dma_unmap = mlx5_net_dma_unmap,
        .probe_again = 1,
        .intr_lsc = 1,
        .intr_rmv = 1,