net/mlx5: rearrange device attribute structure
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 97cf2d9..7487b1f 100644 (file)
@@ -19,6 +19,7 @@
 #include <rte_rwlock.h>
 #include <rte_spinlock.h>
 #include <rte_string_fns.h>
+#include <rte_eal_paging.h>
 #include <rte_alarm.h>
 #include <rte_cycles.h>
 
 /* Device parameter to configure implicit registration of mempool memory. */
 #define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en"
 
+/* Device parameter to configure the delay drop when creating Rxqs. */
+#define MLX5_DELAY_DROP "delay_drop"
+
 /* Shared memory between primary and secondary processes. */
 struct mlx5_shared_data *mlx5_shared_data;
 
@@ -378,7 +382,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
        },
 };
 
-
 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
 
@@ -450,7 +453,7 @@ mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
                mlx5_free(sh->aso_age_mng);
                return -1;
        }
-       rte_spinlock_init(&sh->aso_age_mng->resize_sl);
+       rte_rwlock_init(&sh->aso_age_mng->resize_rwl);
        rte_spinlock_init(&sh->aso_age_mng->free_sl);
        LIST_INIT(&sh->aso_age_mng->free);
        return 0;
@@ -510,6 +513,46 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
        }
 }
 
+/**
+ * DV flow counter mode detect and config.
+ *
+ * @param dev
+ *   Pointer to rte_eth_dev structure.
+ *
+ */
+void
+mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+       bool fallback;
+
+#ifndef HAVE_IBV_DEVX_ASYNC
+       fallback = true;
+#else
+       fallback = false;
+       if (!sh->cdev->config.devx || !priv->config.dv_flow_en ||
+           !hca_attr->flow_counters_dump ||
+           !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
+           (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
+               fallback = true;
+#endif
+       if (fallback)
+               DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
+                       "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
+                       hca_attr->flow_counters_dump,
+                       hca_attr->flow_counter_bulk_alloc_bitmap);
+       /* Initialize fallback mode only on the port initializes sh. */
+       if (sh->refcnt == 1)
+               sh->cmng.counter_fallback = fallback;
+       else if (fallback != sh->cmng.counter_fallback)
+               DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
+                       "with others:%d.", PORT_ID(priv), fallback);
+#endif
+}
+
 /**
  * Initialize the counters management structure.
  *
@@ -519,7 +562,6 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
 static void
 mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
 {
-       struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
        int i;
 
        memset(&sh->cmng, 0, sizeof(sh->cmng));
@@ -532,10 +574,6 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
                TAILQ_INIT(&sh->cmng.counters[i]);
                rte_spinlock_init(&sh->cmng.csl[i]);
        }
-       if (sh->devx && !haswell_broadwell_cpu) {
-               sh->cmng.relaxed_ordering_write = attr->relaxed_ordering_write;
-               sh->cmng.relaxed_ordering_read = attr->relaxed_ordering_read;
-       }
 }
 
 /**
@@ -550,8 +588,7 @@ mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
        uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
 
        LIST_REMOVE(mng, next);
-       claim_zero(mlx5_devx_cmd_destroy(mng->dm));
-       claim_zero(mlx5_os_umem_dereg(mng->umem));
+       mlx5_os_wrapped_mkey_destroy(&mng->wm);
        mlx5_free(mem);
 }
 
@@ -632,6 +669,7 @@ mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
                }
                if (sh->meter_aso_en) {
                        rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl);
+                       rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl);
                        LIST_INIT(&sh->mtrmng->pools_mng.meters);
                }
                sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
@@ -865,8 +903,7 @@ bool
 mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flex_parser_profiles *prf =
-                               &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+       struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
        return !!prf->obj;
 }
@@ -885,15 +922,14 @@ int
 mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flex_parser_profiles *prf =
-                               &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+       struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
        struct mlx5_devx_graph_node_attr node = {
                .modify_field_select = 0,
        };
        uint32_t ids[8];
        int ret;
 
-       if (!priv->config.hca_attr.parse_graph_flex_node) {
+       if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
                DRV_LOG(ERR, "Dynamic flex parser is not supported "
                        "for device %s.", priv->dev_data->name);
                return -ENOTSUP;
@@ -949,8 +985,7 @@ static void
 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flex_parser_profiles *prf =
-                               &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+       struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
        if (prf->obj)
                mlx5_devx_cmd_destroy(prf->obj);
@@ -987,143 +1022,35 @@ mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
        return tn_offloads;
 }
 
-/*
- * Allocate Rx and Tx UARs in robust fashion.
- * This routine handles the following UAR allocation issues:
- *
- *  - tries to allocate the UAR with the most appropriate memory
- *    mapping type from the ones supported by the host
- *
- *  - tries to allocate the UAR with non-NULL base address
- *    OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as
- *    UAR base address if UAR was not the first object in the UAR page.
- *    It caused the PMD failure and we should try to get another UAR
- *    till we get the first one with non-NULL base address returned.
- */
+/* Fill all fields of UAR structure. */
 static int
-mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
-                    const struct mlx5_common_dev_config *config)
+mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh)
 {
-       uint32_t uar_mapping, retry;
-       int err = 0;
-       void *base_addr;
-
-       for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
-               /* Control the mapping type according to the settings. */
-               uar_mapping = (config->dbnc == MLX5_TXDB_NCACHED) ?
-                             MLX5DV_UAR_ALLOC_TYPE_NC :
-                             MLX5DV_UAR_ALLOC_TYPE_BF;
-#else
-               RTE_SET_USED(config);
-               /*
-                * It seems we have no way to control the memory mapping type
-                * for the UAR, the default "Write-Combining" type is supposed.
-                * The UAR initialization on queue creation queries the
-                * actual mapping type done by Verbs/kernel and setups the
-                * PMD datapath accordingly.
-                */
-               uar_mapping = 0;
-#endif
-               sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
-                                                      uar_mapping);
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
-               if (!sh->tx_uar &&
-                   uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
-                       if (config->dbnc == MLX5_TXDB_CACHED ||
-                           config->dbnc == MLX5_TXDB_HEURISTIC)
-                               DRV_LOG(WARNING, "Devarg tx_db_nc setting "
-                                                "is not supported by DevX");
-                       /*
-                        * In some environments like virtual machine
-                        * the Write Combining mapped might be not supported
-                        * and UAR allocation fails. We try "Non-Cached"
-                        * mapping for the case. The tx_burst routines take
-                        * the UAR mapping type into account on UAR setup
-                        * on queue creation.
-                        */
-                       DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)");
-                       uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
-                       sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
-                                                              uar_mapping);
-               } else if (!sh->tx_uar &&
-                          uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
-                       if (config->dbnc == MLX5_TXDB_NCACHED)
-                               DRV_LOG(WARNING, "Devarg tx_db_nc settings "
-                                                "is not supported by DevX");
-                       /*
-                        * If Verbs/kernel does not support "Non-Cached"
-                        * try the "Write-Combining".
-                        */
-                       DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)");
-                       uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
-                       sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
-                                                              uar_mapping);
-               }
-#endif
-               if (!sh->tx_uar) {
-                       DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (BF/NC)");
-                       err = ENOMEM;
-                       goto exit;
-               }
-               base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
-               if (base_addr)
-                       break;
-               /*
-                * The UARs are allocated by rdma_core within the
-                * IB device context, on context closure all UARs
-                * will be freed, should be no memory/object leakage.
-                */
-               DRV_LOG(DEBUG, "Retrying to allocate Tx DevX UAR");
-               sh->tx_uar = NULL;
-       }
-       /* Check whether we finally succeeded with valid UAR allocation. */
-       if (!sh->tx_uar) {
-               DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (NULL base)");
-               err = ENOMEM;
-               goto exit;
-       }
-       for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
-               uar_mapping = 0;
-               sh->devx_rx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
-                                                           uar_mapping);
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
-               if (!sh->devx_rx_uar &&
-                   uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
-                       /*
-                        * Rx UAR is used to control interrupts only,
-                        * should be no datapath noticeable impact,
-                        * can try "Non-Cached" mapping safely.
-                        */
-                       DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)");
-                       uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
-                       sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
-                                                  (sh->cdev->ctx, uar_mapping);
-               }
-#endif
-               if (!sh->devx_rx_uar) {
-                       DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (BF/NC)");
-                       err = ENOMEM;
-                       goto exit;
-               }
-               base_addr = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
-               if (base_addr)
-                       break;
-               /*
-                * The UARs are allocated by rdma_core within the
-                * IB device context, on context closure all UARs
-                * will be freed, should be no memory/object leakage.
-                */
-               DRV_LOG(DEBUG, "Retrying to allocate Rx DevX UAR");
-               sh->devx_rx_uar = NULL;
+       int ret;
+
+       ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar);
+       if (ret) {
+               DRV_LOG(ERR, "Failed to prepare Tx DevX UAR.");
+               return -rte_errno;
        }
-       /* Check whether we finally succeeded with valid UAR allocation. */
-       if (!sh->devx_rx_uar) {
-               DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (NULL base)");
-               err = ENOMEM;
+       MLX5_ASSERT(sh->tx_uar.obj);
+       MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj));
+       ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar);
+       if (ret) {
+               DRV_LOG(ERR, "Failed to prepare Rx DevX UAR.");
+               mlx5_devx_uar_release(&sh->tx_uar);
+               return -rte_errno;
        }
-exit:
-       return err;
+       MLX5_ASSERT(sh->rx_uar.obj);
+       MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj));
+       return 0;
+}
+
+static void
+mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh)
+{
+       mlx5_devx_uar_release(&sh->rx_uar);
+       mlx5_devx_uar_release(&sh->tx_uar);
 }
 
 /**
@@ -1242,6 +1169,43 @@ mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
        return 0;
 }
 
+/**
+ * Configure realtime timestamp format.
+ *
+ * @param sh
+ *   Pointer to mlx5_dev_ctx_shared object.
+ * @param config
+ *   Device configuration parameters.
+ * @param hca_attr
+ *   Pointer to DevX HCA capabilities structure.
+ */
+void
+mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
+                        struct mlx5_dev_config *config,
+                        struct mlx5_hca_attr *hca_attr)
+{
+       uint32_t dw_cnt = MLX5_ST_SZ_DW(register_mtutc);
+       uint32_t reg[dw_cnt];
+       int ret = ENOTSUP;
+
+       if (hca_attr->access_register_user)
+               ret = mlx5_devx_cmd_register_read(sh->cdev->ctx,
+                                                 MLX5_REGISTER_ID_MTUTC, 0,
+                                                 reg, dw_cnt);
+       if (!ret) {
+               uint32_t ts_mode;
+
+               /* MTUTC register is read successfully. */
+               ts_mode = MLX5_GET(register_mtutc, reg, time_stamp_mode);
+               if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
+                       config->rt_timestamp = 1;
+       } else {
+               /* Kernel does not support register reading. */
+               if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
+                       config->rt_timestamp = 1;
+       }
+}
+
 /**
  * Allocate shared device context. If there is multiport device the
  * master and representors will share this context, if there is single
@@ -1285,23 +1249,22 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        MLX5_ASSERT(spawn->max_port);
        sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
                         sizeof(struct mlx5_dev_ctx_shared) +
-                        spawn->max_port *
-                        sizeof(struct mlx5_dev_shared_port),
+                        spawn->max_port * sizeof(struct mlx5_dev_shared_port),
                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
        if (!sh) {
-               DRV_LOG(ERR, "shared context allocation failure");
-               rte_errno  = ENOMEM;
+               DRV_LOG(ERR, "Shared context allocation failure.");
+               rte_errno = ENOMEM;
                goto exit;
        }
        pthread_mutex_init(&sh->txpp.mutex, NULL);
        sh->numa_node = spawn->cdev->dev->numa_node;
        sh->cdev = spawn->cdev;
-       sh->devx = sh->cdev->config.devx;
+       sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
        if (spawn->bond_info)
                sh->bond = *spawn->bond_info;
-       err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
+       err = mlx5_os_capabilities_prepare(sh);
        if (err) {
-               DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
+               DRV_LOG(ERR, "Fail to configure device capabilities.");
                goto error;
        }
        sh->refcnt = 1;
@@ -1312,41 +1275,36 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
                sizeof(sh->ibdev_path) - 1);
        /*
-        * Setting port_id to max unallowed value means
-        * there is no interrupt subhandler installed for
-        * the given port index i.
+        * Setting port_id to max unallowed value means there is no interrupt
+        * subhandler installed for the given port index i.
         */
        for (i = 0; i < sh->max_port; i++) {
                sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
                sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
        }
-       if (sh->devx) {
+       if (sh->cdev->config.devx) {
                sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
                if (!sh->td) {
                        DRV_LOG(ERR, "TD allocation failure");
-                       err = ENOMEM;
+                       rte_errno = ENOMEM;
                        goto error;
                }
                if (mlx5_setup_tis(sh)) {
                        DRV_LOG(ERR, "TIS allocation failure");
-                       err = ENOMEM;
+                       rte_errno = ENOMEM;
                        goto error;
                }
-               err = mlx5_alloc_rxtx_uars(sh, &sh->cdev->config);
+               err = mlx5_rxtx_uars_prepare(sh);
                if (err)
                        goto error;
-               MLX5_ASSERT(sh->tx_uar);
-               MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar));
-
-               MLX5_ASSERT(sh->devx_rx_uar);
-               MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar));
-       }
 #ifndef RTE_ARCH_64
-       /* Initialize UAR access locks for 32bit implementations. */
-       rte_spinlock_init(&sh->uar_lock_cq);
-       for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
-               rte_spinlock_init(&sh->uar_lock[i]);
+       } else {
+               /* Initialize UAR access locks for 32bit implementations. */
+               rte_spinlock_init(&sh->uar_lock_cq);
+               for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+                       rte_spinlock_init(&sh->uar_lock[i]);
 #endif
+       }
        mlx5_os_dev_shared_handler_install(sh);
        if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
                err = mlx5_flow_os_init_workspace_once();
@@ -1363,22 +1321,19 @@ exit:
        pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
        return sh;
 error:
+       err = rte_errno;
        pthread_mutex_destroy(&sh->txpp.mutex);
        pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
        MLX5_ASSERT(sh);
-       if (sh->td)
-               claim_zero(mlx5_devx_cmd_destroy(sh->td));
+       mlx5_rxtx_uars_release(sh);
        i = 0;
        do {
                if (sh->tis[i])
                        claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
        } while (++i < (uint32_t)sh->bond.n_port);
-       if (sh->devx_rx_uar)
-               mlx5_glue->devx_free_uar(sh->devx_rx_uar);
-       if (sh->tx_uar)
-               mlx5_glue->devx_free_uar(sh->tx_uar);
+       if (sh->td)
+               claim_zero(mlx5_devx_cmd_destroy(sh->td));
        mlx5_free(sh);
-       MLX5_ASSERT(err > 0);
        rte_errno = err;
        return NULL;
 }
@@ -1432,11 +1387,17 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
                mlx5_flow_os_release_workspace();
        }
        pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+       if (sh->flex_parsers_dv) {
+               mlx5_list_destroy(sh->flex_parsers_dv);
+               sh->flex_parsers_dv = NULL;
+       }
        /*
         *  Ensure there is no async event handler installed.
         *  Only primary process handles async device events.
         **/
        mlx5_flow_counters_mng_close(sh);
+       if (sh->ct_mng)
+               mlx5_flow_aso_ct_mng_close(sh);
        if (sh->aso_age_mng) {
                mlx5_flow_aso_age_mng_close(sh);
                sh->aso_age_mng = NULL;
@@ -1445,18 +1406,13 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
                mlx5_aso_flow_mtrs_mng_close(sh);
        mlx5_flow_ipool_destroy(sh);
        mlx5_os_dev_shared_handler_uninstall(sh);
-       if (sh->tx_uar) {
-               mlx5_glue->devx_free_uar(sh->tx_uar);
-               sh->tx_uar = NULL;
-       }
+       mlx5_rxtx_uars_release(sh);
        do {
                if (sh->tis[i])
                        claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
        } while (++i < sh->bond.n_port);
        if (sh->td)
                claim_zero(mlx5_devx_cmd_destroy(sh->td));
-       if (sh->devx_rx_uar)
-               mlx5_glue->devx_free_uar(sh->devx_rx_uar);
        MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
        pthread_mutex_destroy(&sh->txpp.mutex);
        mlx5_free(sh);
@@ -1576,10 +1532,10 @@ mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
                         struct rte_eth_udp_tunnel *udp_tunnel)
 {
        MLX5_ASSERT(udp_tunnel != NULL);
-       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+       if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
            udp_tunnel->udp_port == 4789)
                return 0;
-       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+       if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
            udp_tunnel->udp_port == 4790)
                return 0;
        return -ENOTSUP;
@@ -1606,8 +1562,8 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev)
         * UAR register table follows the process private structure. BlueFlame
         * registers for Tx queues are stored in the table.
         */
-       ppriv_size =
-               sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
+       ppriv_size = sizeof(struct mlx5_proc_priv) +
+                    priv->txqs_n * sizeof(struct mlx5_uar_data);
        ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size,
                            RTE_CACHE_LINE_SIZE, dev->device->numa_node);
        if (!ppriv) {
@@ -1616,6 +1572,8 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev)
        }
        ppriv->uar_table_sz = priv->txqs_n;
        dev->process_private = ppriv;
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               priv->sh->pppriv = ppriv;
        return 0;
 }
 
@@ -1678,25 +1636,22 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        mlx5_action_handle_flush(dev);
        mlx5_flow_meter_flush(dev, NULL);
        /* Prevent crashes when queues are still in use. */
-       dev->rx_pkt_burst = removed_rx_burst;
-       dev->tx_pkt_burst = removed_tx_burst;
+       dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+       dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
        rte_wmb();
        /* Disable datapath on secondary process. */
        mlx5_mp_os_req_stop_rxtx(dev);
        /* Free the eCPRI flex parser resource. */
        mlx5_flex_parser_ecpri_release(dev);
-       if (priv->rxqs != NULL) {
+       mlx5_flex_item_port_cleanup(dev);
+       if (priv->rxq_privs != NULL) {
                /* XXX race condition if mlx5_rx_burst() is still running. */
                rte_delay_us_sleep(1000);
                for (i = 0; (i != priv->rxqs_n); ++i)
                        mlx5_rxq_release(dev, i);
                priv->rxqs_n = 0;
-               priv->rxqs = NULL;
-       }
-       if (priv->representor) {
-               /* Each representor has a dedicated interrupts handler */
-               mlx5_free(dev->intr_handle);
-               dev->intr_handle = NULL;
+               mlx5_free(priv->rxq_privs);
+               priv->rxq_privs = NULL;
        }
        if (priv->txqs != NULL) {
                /* XXX race condition if mlx5_tx_burst() is still running. */
@@ -1716,8 +1671,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        if (priv->mreg_cp_tbl)
                mlx5_hlist_destroy(priv->mreg_cp_tbl);
        mlx5_mprq_free_mp(dev);
-       if (priv->sh->ct_mng)
-               mlx5_flow_aso_ct_mng_close(priv->sh);
        mlx5_os_free_shared_dr(priv);
        if (priv->rss_conf.rss_key != NULL)
                mlx5_free(priv->rss_conf.rss_key);
@@ -1764,7 +1717,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        /*
         * Free the shared context in last turn, because the cleanup
         * routines above may use some shared fields, like
-        * mlx5_os_mac_addr_flush() uses ibdev_path for retrieveing
+        * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving
         * ifindex if Netlink fails.
         */
        mlx5_free_shared_dev_ctx(priv->sh);
@@ -2006,9 +1959,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
        } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
                config->mprq.enabled = !!tmp;
        } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
-               config->mprq.stride_num_n = tmp;
+               config->mprq.log_stride_num = tmp;
        } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) {
-               config->mprq.stride_size_n = tmp;
+               config->mprq.log_stride_size = tmp;
        } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
                config->mprq.max_memcpy_len = tmp;
        } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
@@ -2084,7 +2037,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                if (tmp != MLX5_RCM_NONE &&
                    tmp != MLX5_RCM_LIGHT &&
                    tmp != MLX5_RCM_AGGR) {
-                       DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
+                       DRV_LOG(ERR, "Unrecognized %s: \"%s\"", key, val);
                        rte_errno = EINVAL;
                        return -rte_errno;
                }
@@ -2093,10 +2046,13 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                config->decap_en = !!tmp;
        } else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
                config->allow_duplicate_pattern = !!tmp;
+       } else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
+               config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
+               config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
        } else {
-               DRV_LOG(WARNING, "%s: unknown parameter", key);
-               rte_errno = EINVAL;
-               return -rte_errno;
+               DRV_LOG(WARNING,
+                       "%s: unknown parameter, maybe it's for another class.",
+                       key);
        }
        return 0;
 }
@@ -2115,74 +2071,25 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
 int
 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
 {
-       const char **params = (const char *[]){
-               MLX5_DRIVER_KEY,
-               MLX5_RXQ_CQE_COMP_EN,
-               MLX5_RXQ_PKT_PAD_EN,
-               MLX5_RX_MPRQ_EN,
-               MLX5_RX_MPRQ_LOG_STRIDE_NUM,
-               MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
-               MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
-               MLX5_RXQS_MIN_MPRQ,
-               MLX5_TXQ_INLINE,
-               MLX5_TXQ_INLINE_MIN,
-               MLX5_TXQ_INLINE_MAX,
-               MLX5_TXQ_INLINE_MPW,
-               MLX5_TXQS_MIN_INLINE,
-               MLX5_TXQS_MAX_VEC,
-               MLX5_TXQ_MPW_EN,
-               MLX5_TXQ_MPW_HDR_DSEG_EN,
-               MLX5_TXQ_MAX_INLINE_LEN,
-               MLX5_TX_DB_NC,
-               MLX5_TX_PP,
-               MLX5_TX_SKEW,
-               MLX5_TX_VEC_EN,
-               MLX5_RX_VEC_EN,
-               MLX5_L3_VXLAN_EN,
-               MLX5_VF_NL_EN,
-               MLX5_DV_ESW_EN,
-               MLX5_DV_FLOW_EN,
-               MLX5_DV_XMETA_EN,
-               MLX5_LACP_BY_USER,
-               MLX5_MR_EXT_MEMSEG_EN,
-               MLX5_REPRESENTOR,
-               MLX5_MAX_DUMP_FILES_NUM,
-               MLX5_LRO_TIMEOUT_USEC,
-               RTE_DEVARGS_KEY_CLASS,
-               MLX5_HP_BUF_SIZE,
-               MLX5_RECLAIM_MEM,
-               MLX5_SYS_MEM_EN,
-               MLX5_DECAP_EN,
-               MLX5_ALLOW_DUPLICATE_PATTERN,
-               MLX5_MR_MEMPOOL_REG_EN,
-               NULL,
-       };
        struct rte_kvargs *kvlist;
        int ret = 0;
-       int i;
 
        if (devargs == NULL)
                return 0;
        /* Following UGLY cast is done to pass checkpatch. */
-       kvlist = rte_kvargs_parse(devargs->args, params);
+       kvlist = rte_kvargs_parse(devargs->args, NULL);
        if (kvlist == NULL) {
                rte_errno = EINVAL;
                return -rte_errno;
        }
        /* Process parameters. */
-       for (i = 0; (params[i] != NULL); ++i) {
-               if (rte_kvargs_count(kvlist, params[i])) {
-                       ret = rte_kvargs_process(kvlist, params[i],
-                                                mlx5_args_check, config);
-                       if (ret) {
-                               rte_errno = EINVAL;
-                               rte_kvargs_free(kvlist);
-                               return -rte_errno;
-                       }
-               }
+       ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config);
+       if (ret) {
+               rte_errno = EINVAL;
+               ret = -rte_errno;
        }
        rte_kvargs_free(kvlist);
-       return 0;
+       return ret;
 }
 
 /**
@@ -2205,6 +2112,8 @@ void
 mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
                    struct mlx5_dev_config *config)
 {
+       struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
+
        if (config->txq_inline_min != MLX5_ARG_UNSET) {
                /* Application defines size of inlined data explicitly. */
                if (spawn->pci_dev != NULL) {
@@ -2224,9 +2133,9 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
                }
                goto exit;
        }
-       if (config->hca_attr.eth_net_offloads) {
+       if (hca_attr->eth_net_offloads) {
                /* We have DevX enabled, inline mode queried successfully. */
-               switch (config->hca_attr.wqe_inline_mode) {
+               switch (hca_attr->wqe_inline_mode) {
                case MLX5_CAP_INLINE_MODE_L2:
                        /* outer L2 header must be inlined. */
                        config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
@@ -2235,14 +2144,14 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
                        /* No inline data are required by NIC. */
                        config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
                        config->hw_vlan_insert =
-                               config->hca_attr.wqe_vlan_insert;
+                               hca_attr->wqe_vlan_insert;
                        DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
                        goto exit;
                case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
                        /* inline mode is defined by NIC vport context. */
-                       if (!config->hca_attr.eth_virt)
+                       if (!hca_attr->eth_virt)
                                break;
-                       switch (config->hca_attr.vport_inline_mode) {
+                       switch (hca_attr->vport_inline_mode) {
                        case MLX5_INLINE_MODE_NONE:
                                config->txq_inline_min =
                                        MLX5_INLINE_HSIZE_NONE;
@@ -2345,17 +2254,17 @@ mlx5_set_metadata_mask(struct rte_eth_dev *dev)
                break;
        }
        if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
-               DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X",
+               DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X",
                                 sh->dv_mark_mask, mark);
        else
                sh->dv_mark_mask = mark;
        if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
-               DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X",
+               DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X",
                                 sh->dv_meta_mask, meta);
        else
                sh->dv_meta_mask = meta;
        if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
-               DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X",
+               DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X",
                                 sh->dv_meta_mask, reg_c0);
        else
                sh->dv_regc0_mask = reg_c0;
@@ -2386,25 +2295,26 @@ rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
 }
 
 /**
- * Comparison callback to sort device data.
+ * Check sibling device configurations.
  *
- * This is meant to be used with qsort().
+ * Sibling devices sharing the Infiniband device context should have compatible
+ * configurations. This regards representors and bonding device.
  *
- * @param a[in]
- *   Pointer to pointer to first data object.
- * @param b[in]
- *   Pointer to pointer to second data object.
+ * @param sh
+ *   Shared device context.
+ * @param config
+ *   Configuration of the device is going to be created.
+ * @param dpdk_dev
+ *   Backing DPDK device.
  *
  * @return
- *   0 if both objects are equal, less than 0 if the first argument is less
- *   than the second, greater than 0 otherwise.
+ *   0 on success, EINVAL otherwise
  */
 int
-mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
+mlx5_dev_check_sibling_config(struct mlx5_dev_ctx_shared *sh,
                              struct mlx5_dev_config *config,
                              struct rte_device *dpdk_dev)
 {
-       struct mlx5_dev_ctx_shared *sh = priv->sh;
        struct mlx5_dev_config *sh_conf = NULL;
        uint16_t port_id;
 
@@ -2417,7 +2327,7 @@ mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
                struct mlx5_priv *opriv =
                        rte_eth_devices[port_id].data->dev_private;
 
-               if (opriv && opriv != priv && opriv->sh == sh) {
+               if (opriv && opriv->sh == sh) {
                        sh_conf = &opriv->config;
                        break;
                }