net/mlx5: share Direct Rules/Verbs flow related structures
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 9dd74ec..f571ba2 100644 (file)
 /* Activate Netlink support in VF mode. */
 #define MLX5_VF_NL_EN "vf_nl_en"
 
+/* Enable extending memsegs when creating a MR. */
+#define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en"
+
 /* Select port representors to instantiate. */
 #define MLX5_REPRESENTOR "representor"
 
@@ -127,6 +130,9 @@ struct mlx5_shared_data *mlx5_shared_data;
 /* Spinlock for mlx5_shared_data allocation. */
 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
 
+/* Process local data for secondary processes. */
+static struct mlx5_local_data mlx5_local_data;
+
 /** Driver-specific log messages type. */
 int mlx5_logtype;
 
@@ -140,13 +146,273 @@ struct mlx5_dev_spawn_data {
        struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
 };
 
+static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
+static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/**
+ * Allocate shared IB device context. If there is multiport device the
+ * master and representors will share this context, if there is single
+ * port dedicated IB device, the context will be used by only given
+ * port due to unification.
+ *
+ * Routine first searches the context for the spesified IB device name,
+ * if found the shared context assumed and reference counter is incremented.
+ * If no context found the new one is created and initialized with specified
+ * IB device context and parameters.
+ *
+ * @param[in] spawn
+ *   Pointer to the IB device attributes (name, port, etc).
+ *
+ * @return
+ *   Pointer to mlx5_ibv_shared object on success,
+ *   otherwise NULL and rte_errno is set.
+ */
+static struct mlx5_ibv_shared *
+mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
+{
+       struct mlx5_ibv_shared *sh;
+       int err = 0;
+       uint32_t i;
+
+       assert(spawn);
+       /* Secondary process should not create the shared context. */
+       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       pthread_mutex_lock(&mlx5_ibv_list_mutex);
+       /* Search for IB context by device name. */
+       LIST_FOREACH(sh, &mlx5_ibv_list, next) {
+               if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) {
+                       sh->refcnt++;
+                       goto exit;
+               }
+       }
+       /* No device found, we have to create new sharted context. */
+       assert(spawn->max_port);
+       sh = rte_zmalloc("ethdev shared ib context",
+                        sizeof(struct mlx5_ibv_shared) +
+                        spawn->max_port *
+                        sizeof(struct mlx5_ibv_shared_port),
+                        RTE_CACHE_LINE_SIZE);
+       if (!sh) {
+               DRV_LOG(ERR, "shared context allocation failure");
+               rte_errno  = ENOMEM;
+               goto exit;
+       }
+       /* Try to open IB device with DV first, then usual Verbs. */
+       errno = 0;
+       sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev);
+       if (sh->ctx) {
+               sh->devx = 1;
+               DRV_LOG(DEBUG, "DevX is supported");
+       } else {
+               sh->ctx = mlx5_glue->open_device(spawn->ibv_dev);
+               if (!sh->ctx) {
+                       err = errno ? errno : ENODEV;
+                       goto error;
+               }
+               DRV_LOG(DEBUG, "DevX is NOT supported");
+       }
+       err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr);
+       if (err) {
+               DRV_LOG(DEBUG, "ibv_query_device_ex() failed");
+               goto error;
+       }
+       sh->refcnt = 1;
+       sh->max_port = spawn->max_port;
+       strncpy(sh->ibdev_name, sh->ctx->device->name,
+               sizeof(sh->ibdev_name));
+       strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,
+               sizeof(sh->ibdev_path));
+       pthread_mutex_init(&sh->intr_mutex, NULL);
+       /*
+        * Setting port_id to max unallowed value means
+        * there is no interrupt subhandler installed for
+        * the given port index i.
+        */
+       for (i = 0; i < sh->max_port; i++)
+               sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
+       sh->pd = mlx5_glue->alloc_pd(sh->ctx);
+       if (sh->pd == NULL) {
+               DRV_LOG(ERR, "PD allocation failure");
+               err = ENOMEM;
+               goto error;
+       }
+       LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
+exit:
+       pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+       return sh;
+error:
+       pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+       assert(sh);
+       if (sh->pd)
+               claim_zero(mlx5_glue->dealloc_pd(sh->pd));
+       if (sh->ctx)
+               claim_zero(mlx5_glue->close_device(sh->ctx));
+       rte_free(sh);
+       assert(err > 0);
+       rte_errno = err;
+       return NULL;
+}
+
+/**
+ * Free shared IB device context. Decrement counter and if zero free
+ * all allocated resources and close handles.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_ibv_shared object to free
+ */
+static void
+mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
+{
+       pthread_mutex_lock(&mlx5_ibv_list_mutex);
+#ifndef NDEBUG
+       /* Check the object presence in the list. */
+       struct mlx5_ibv_shared *lctx;
+
+       LIST_FOREACH(lctx, &mlx5_ibv_list, next)
+               if (lctx == sh)
+                       break;
+       assert(lctx);
+       if (lctx != sh) {
+               DRV_LOG(ERR, "Freeing non-existing shared IB context");
+               goto exit;
+       }
+#endif
+       assert(sh);
+       assert(sh->refcnt);
+       /* Secondary process should not free the shared context. */
+       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       if (--sh->refcnt)
+               goto exit;
+       LIST_REMOVE(sh, next);
+       /*
+        *  Ensure there is no async event handler installed.
+        *  Only primary process handles async device events.
+        **/
+       assert(!sh->intr_cnt);
+       if (sh->intr_cnt)
+               rte_intr_callback_unregister
+                       (&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
+       pthread_mutex_destroy(&sh->intr_mutex);
+       if (sh->pd)
+               claim_zero(mlx5_glue->dealloc_pd(sh->pd));
+       if (sh->ctx)
+               claim_zero(mlx5_glue->close_device(sh->ctx));
+       rte_free(sh);
+exit:
+       pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+}
+
+/**
+ * Initialize DR related data within private structure.
+ * Routine checks the reference counter and does actual
+ * resources creation/iniialization only if counter is zero.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
+ *
+ * @return
+ *   Zero on success, positive error code otherwise.
+ */
+static int
+mlx5_alloc_shared_dr(struct mlx5_priv *priv)
+{
+#ifdef HAVE_MLX5DV_DR
+       struct mlx5_ibv_shared *sh = priv->sh;
+       int err = 0;
+       void *ns;
+
+       assert(sh);
+       if (sh->dv_refcnt) {
+               /* Shared DV/DR structures is already initialized. */
+               sh->dv_refcnt++;
+               priv->dr_shared = 1;
+               return 0;
+       }
+       /* Reference counter is zero, we should initialize structures. */
+       ns = mlx5dv_dr_create_ns(sh->ctx, MLX5DV_DR_NS_DOMAIN_INGRESS_BYPASS);
+       if (!ns) {
+               DRV_LOG(ERR, "ingress mlx5dv_dr_create_ns failed");
+               err = errno;
+               goto error;
+       }
+       sh->rx_ns = ns;
+       ns = mlx5dv_dr_create_ns(sh->ctx, MLX5DV_DR_NS_DOMAIN_EGRESS_BYPASS);
+       if (!ns) {
+               DRV_LOG(ERR, "egress mlx5dv_dr_create_ns failed");
+               err = errno;
+               goto error;
+       }
+       pthread_mutex_init(&sh->dv_mutex, NULL);
+       sh->tx_ns = ns;
+       sh->dv_refcnt++;
+       priv->dr_shared = 1;
+       return 0;
+
+error:
+       /* Rollback the created objects. */
+       if (sh->rx_ns) {
+               mlx5dv_dr_destroy_ns(sh->rx_ns);
+               sh->rx_ns = NULL;
+       }
+       if (sh->tx_ns) {
+               mlx5dv_dr_destroy_ns(sh->tx_ns);
+               sh->tx_ns = NULL;
+       }
+       return err;
+#else
+       (void)priv;
+       return 0;
+#endif
+}
+
 /**
- * Prepare shared data between primary and secondary process.
+ * Destroy DR related data within private structure.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
  */
 static void
-mlx5_prepare_shared_data(void)
+mlx5_free_shared_dr(struct mlx5_priv *priv)
+{
+#ifdef HAVE_MLX5DV_DR
+       struct mlx5_ibv_shared *sh;
+
+       if (!priv->dr_shared)
+               return;
+       priv->dr_shared = 0;
+       sh = priv->sh;
+       assert(sh);
+       assert(sh->dv_refcnt);
+       if (sh->dv_refcnt && --sh->dv_refcnt)
+               return;
+       if (sh->rx_ns) {
+               mlx5dv_dr_destroy_ns(sh->rx_ns);
+               sh->rx_ns = NULL;
+       }
+       if (sh->tx_ns) {
+               mlx5dv_dr_destroy_ns(sh->tx_ns);
+               sh->tx_ns = NULL;
+       }
+       pthread_mutex_destroy(&sh->dv_mutex);
+#else
+       (void)priv;
+#endif
+}
+
+/**
+ * Initialize shared data between primary and secondary process.
+ *
+ * A memzone is reserved by primary process and secondary processes attach to
+ * the memzone.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_init_shared_data(void)
 {
        const struct rte_memzone *mz;
+       int ret = 0;
 
        rte_spinlock_lock(&mlx5_shared_data_lock);
        if (mlx5_shared_data == NULL) {
@@ -155,20 +421,53 @@ mlx5_prepare_shared_data(void)
                        mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
                                                 sizeof(*mlx5_shared_data),
                                                 SOCKET_ID_ANY, 0);
+                       if (mz == NULL) {
+                               DRV_LOG(ERR,
+                                       "Cannot allocate mlx5 shared data\n");
+                               ret = -rte_errno;
+                               goto error;
+                       }
+                       mlx5_shared_data = mz->addr;
+                       memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
+                       rte_spinlock_init(&mlx5_shared_data->lock);
                } else {
                        /* Lookup allocated shared memory. */
                        mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
+                       if (mz == NULL) {
+                               DRV_LOG(ERR,
+                                       "Cannot attach mlx5 shared data\n");
+                               ret = -rte_errno;
+                               goto error;
+                       }
+                       mlx5_shared_data = mz->addr;
+                       memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
                }
-               if (mz == NULL)
-                       rte_panic("Cannot allocate mlx5 shared data\n");
-               mlx5_shared_data = mz->addr;
-               /* Initialize shared data. */
+       }
+error:
+       rte_spinlock_unlock(&mlx5_shared_data_lock);
+       return ret;
+}
+
+/**
+ * Uninitialize shared data between primary and secondary process.
+ *
+ * The pointer of secondary process is dereferenced and primary process frees
+ * the memzone.
+ */
+static void
+mlx5_uninit_shared_data(void)
+{
+       const struct rte_memzone *mz;
+
+       rte_spinlock_lock(&mlx5_shared_data_lock);
+       if (mlx5_shared_data) {
                if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-                       LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
-                       rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
+                       mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
+                       rte_memzone_free(mz);
+               } else {
+                       memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
                }
-               rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
-                                               mlx5_mr_mem_event_cb, NULL);
+               mlx5_shared_data = NULL;
        }
        rte_spinlock_unlock(&mlx5_shared_data_lock);
 }
@@ -263,7 +562,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
 
        DRV_LOG(DEBUG, "port %u closing device \"%s\"",
                dev->data->port_id,
-               ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
+               ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
        /* In case mlx5_dev_stop() has not been called. */
        mlx5_dev_interrupt_handler_uninstall(dev);
        mlx5_traffic_disable(dev);
@@ -271,6 +570,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        /* Prevent crashes when queues are still in use. */
        dev->rx_pkt_burst = removed_rx_burst;
        dev->tx_pkt_burst = removed_tx_burst;
+       rte_wmb();
+       /* Disable datapath on secondary process. */
+       mlx5_mp_req_stop_rxtx(dev);
        if (priv->rxqs != NULL) {
                /* XXX race condition if mlx5_rx_burst() is still running. */
                usleep(1000);
@@ -289,18 +591,15 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        }
        mlx5_mprq_free_mp(dev);
        mlx5_mr_release(dev);
-       if (priv->pd != NULL) {
-               assert(priv->ctx != NULL);
-               claim_zero(mlx5_glue->dealloc_pd(priv->pd));
-               claim_zero(mlx5_glue->close_device(priv->ctx));
-       } else
-               assert(priv->ctx == NULL);
+       assert(priv->sh);
+       mlx5_free_shared_dr(priv);
+       if (priv->sh)
+               mlx5_free_shared_ibctx(priv->sh);
+       priv->sh = NULL;
        if (priv->rss_conf.rss_key != NULL)
                rte_free(priv->rss_conf.rss_key);
        if (priv->reta_idx != NULL)
                rte_free(priv->reta_idx);
-       if (priv->primary_socket)
-               mlx5_socket_uninit(dev);
        if (priv->config.vf)
                mlx5_nl_mac_addr_flush(dev);
        if (priv->nl_socket_route >= 0)
@@ -339,17 +638,15 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                        dev->data->port_id);
        if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
                unsigned int c = 0;
-               unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
-               uint16_t port_id[i];
+               uint16_t port_id;
 
-               i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
-               while (i--) {
+               RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) {
                        struct mlx5_priv *opriv =
-                               rte_eth_devices[port_id[i]].data->dev_private;
+                               rte_eth_devices[port_id].data->dev_private;
 
                        if (!opriv ||
                            opriv->domain_id != priv->domain_id ||
-                           &rte_eth_devices[port_id[i]] == dev)
+                           &rte_eth_devices[port_id] == dev)
                                continue;
                        ++c;
                }
@@ -534,6 +831,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                config->vf_nl_en = !!tmp;
        } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
                config->dv_flow_en = !!tmp;
+       } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
+               config->mr_ext_memseg_en = !!tmp;
        } else {
                DRV_LOG(WARNING, "%s: unknown parameter", key);
                rte_errno = EINVAL;
@@ -575,6 +874,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
                MLX5_L3_VXLAN_EN,
                MLX5_VF_NL_EN,
                MLX5_DV_FLOW_EN,
+               MLX5_MR_EXT_MEMSEG_EN,
                MLX5_REPRESENTOR,
                NULL,
        };
@@ -606,15 +906,6 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
 
 static struct rte_pci_driver mlx5_driver;
 
-/*
- * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
- * local resource used by both primary and secondary to avoid duplicate
- * reservation.
- * The space has to be available on both primary and secondary process,
- * TXQ UAR maps to this area using fixed mmap w/o double check.
- */
-static void *uar_base;
-
 static int
 find_lower_va_bound(const struct rte_memseg_list *msl,
                const struct rte_memseg *ms, void *arg)
@@ -634,25 +925,24 @@ find_lower_va_bound(const struct rte_memseg_list *msl,
 /**
  * Reserve UAR address space for primary process.
  *
- * @param[in] dev
- *   Pointer to Ethernet device.
+ * Process local resource is used by both primary and secondary to avoid
+ * duplicate reservation. The space has to be available on both primary and
+ * secondary process, TXQ UAR maps to this area using fixed mmap w/o double
+ * check.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_uar_init_primary(struct rte_eth_dev *dev)
+mlx5_uar_init_primary(void)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_shared_data *sd = mlx5_shared_data;
        void *addr = (void *)0;
 
-       if (uar_base) { /* UAR address space mapped. */
-               priv->uar_base = uar_base;
+       if (sd->uar_base)
                return 0;
-       }
        /* find out lower bound of hugepage segments */
        rte_memseg_walk(find_lower_va_bound, &addr);
-
        /* keep distance to hugepages to minimize potential conflicts. */
        addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE));
        /* anonymous mmap, no real memory consumption. */
@@ -660,65 +950,156 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
                    PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
        if (addr == MAP_FAILED) {
                DRV_LOG(ERR,
-                       "port %u failed to reserve UAR address space, please"
-                       " adjust MLX5_UAR_SIZE or try --base-virtaddr",
-                       dev->data->port_id);
+                       "Failed to reserve UAR address space, please"
+                       " adjust MLX5_UAR_SIZE or try --base-virtaddr");
                rte_errno = ENOMEM;
                return -rte_errno;
        }
        /* Accept either same addr or a new addr returned from mmap if target
         * range occupied.
         */
-       DRV_LOG(INFO, "port %u reserved UAR address space: %p",
-               dev->data->port_id, addr);
-       priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
-       uar_base = addr; /* process local, don't reserve again. */
+       DRV_LOG(INFO, "Reserved UAR address space: %p", addr);
+       sd->uar_base = addr; /* for primary and secondary UAR re-mmap. */
        return 0;
 }
 
 /**
- * Reserve UAR address space for secondary process, align with
- * primary process.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
+ * Unmap UAR address space reserved for primary process.
+ */
+static void
+mlx5_uar_uninit_primary(void)
+{
+       struct mlx5_shared_data *sd = mlx5_shared_data;
+
+       if (!sd->uar_base)
+               return;
+       munmap(sd->uar_base, MLX5_UAR_SIZE);
+       sd->uar_base = NULL;
+}
+
+/**
+ * Reserve UAR address space for secondary process, align with primary process.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_uar_init_secondary(struct rte_eth_dev *dev)
+mlx5_uar_init_secondary(void)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_shared_data *sd = mlx5_shared_data;
+       struct mlx5_local_data *ld = &mlx5_local_data;
        void *addr;
 
-       assert(priv->uar_base);
-       if (uar_base) { /* already reserved. */
-               assert(uar_base == priv->uar_base);
+       if (ld->uar_base) { /* Already reserved. */
+               assert(sd->uar_base == ld->uar_base);
                return 0;
        }
+       assert(sd->uar_base);
        /* anonymous mmap, no real memory consumption. */
-       addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
+       addr = mmap(sd->uar_base, MLX5_UAR_SIZE,
                    PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
        if (addr == MAP_FAILED) {
-               DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
-                       dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+               DRV_LOG(ERR, "UAR mmap failed: %p size: %llu",
+                       sd->uar_base, MLX5_UAR_SIZE);
                rte_errno = ENXIO;
                return -rte_errno;
        }
-       if (priv->uar_base != addr) {
+       if (sd->uar_base != addr) {
                DRV_LOG(ERR,
-                       "port %u UAR address %p size %llu occupied, please"
+                       "UAR address %p size %llu occupied, please"
                        " adjust MLX5_UAR_OFFSET or try EAL parameter"
                        " --base-virtaddr",
-                       dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+                       sd->uar_base, MLX5_UAR_SIZE);
                rte_errno = ENXIO;
                return -rte_errno;
        }
-       uar_base = addr; /* process local, don't reserve again */
-       DRV_LOG(INFO, "port %u reserved UAR address space: %p",
-               dev->data->port_id, addr);
+       ld->uar_base = addr;
+       DRV_LOG(INFO, "Reserved UAR address space: %p", addr);
+       return 0;
+}
+
+/**
+ * Unmap UAR address space reserved for secondary process.
+ */
+static void
+mlx5_uar_uninit_secondary(void)
+{
+       struct mlx5_local_data *ld = &mlx5_local_data;
+
+       if (!ld->uar_base)
+               return;
+       munmap(ld->uar_base, MLX5_UAR_SIZE);
+       ld->uar_base = NULL;
+}
+
+/**
+ * PMD global initialization.
+ *
+ * Independent from individual device, this function initializes global
+ * per-PMD data structures distinguishing primary and secondary processes.
+ * Hence, each initialization is called once per a process.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_init_once(void)
+{
+       struct mlx5_shared_data *sd;
+       struct mlx5_local_data *ld = &mlx5_local_data;
+       int ret;
+
+       if (mlx5_init_shared_data())
+               return -rte_errno;
+       sd = mlx5_shared_data;
+       assert(sd);
+       rte_spinlock_lock(&sd->lock);
+       switch (rte_eal_process_type()) {
+       case RTE_PROC_PRIMARY:
+               if (sd->init_done)
+                       break;
+               LIST_INIT(&sd->mem_event_cb_list);
+               rte_rwlock_init(&sd->mem_event_rwlock);
+               rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+                                               mlx5_mr_mem_event_cb, NULL);
+               mlx5_mp_init_primary();
+               ret = mlx5_uar_init_primary();
+               if (ret)
+                       goto error;
+               sd->init_done = true;
+               break;
+       case RTE_PROC_SECONDARY:
+               if (ld->init_done)
+                       break;
+               mlx5_mp_init_secondary();
+               ret = mlx5_uar_init_secondary();
+               if (ret)
+                       goto error;
+               ++sd->secondary_cnt;
+               ld->init_done = true;
+               break;
+       default:
+               break;
+       }
+       rte_spinlock_unlock(&sd->lock);
        return 0;
+error:
+       switch (rte_eal_process_type()) {
+       case RTE_PROC_PRIMARY:
+               mlx5_uar_uninit_primary();
+               mlx5_mp_uninit_primary();
+               rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", NULL);
+               break;
+       case RTE_PROC_SECONDARY:
+               mlx5_uar_uninit_secondary();
+               mlx5_mp_uninit_secondary();
+               break;
+       default:
+               break;
+       }
+       rte_spinlock_unlock(&sd->lock);
+       mlx5_uninit_shared_data();
+       return -rte_errno;
 }
 
 /**
@@ -744,11 +1125,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
               struct mlx5_dev_config config)
 {
        const struct mlx5_switch_info *switch_info = &spawn->info;
-       struct ibv_device *ibv_dev = spawn->ibv_dev;
-       struct ibv_context *ctx = NULL;
-       struct ibv_device_attr_ex attr;
+       struct mlx5_ibv_shared *sh = NULL;
        struct ibv_port_attr port_attr;
-       struct ibv_pd *pd = NULL;
        struct mlx5dv_context dv_attr = { .comp_mask = 0 };
        struct rte_eth_dev *eth_dev = NULL;
        struct mlx5_priv *priv = NULL;
@@ -802,20 +1180,37 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                rte_errno = EEXIST;
                return NULL;
        }
-       /* Prepare shared data between primary and secondary process. */
-       mlx5_prepare_shared_data();
-       errno = 0;
-       ctx = mlx5_glue->dv_open_device(ibv_dev);
-       if (ctx) {
-               config.devx = 1;
-               DRV_LOG(DEBUG, "DEVX is supported");
-       } else {
-               ctx = mlx5_glue->open_device(ibv_dev);
-               if (!ctx) {
-                       rte_errno = errno ? errno : ENODEV;
+       DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+               eth_dev = rte_eth_dev_attach_secondary(name);
+               if (eth_dev == NULL) {
+                       DRV_LOG(ERR, "can not attach rte ethdev");
+                       rte_errno = ENOMEM;
                        return NULL;
                }
+               eth_dev->device = dpdk_dev;
+               eth_dev->dev_ops = &mlx5_dev_sec_ops;
+               /* Receive command fd from primary process */
+               err = mlx5_mp_req_verbs_cmd_fd(eth_dev);
+               if (err < 0)
+                       return NULL;
+               /* Remap UAR for Tx queues. */
+               err = mlx5_tx_uar_remap(eth_dev, err);
+               if (err)
+                       return NULL;
+               /*
+                * Ethdev pointer is still required as input since
+                * the primary device is not accessible from the
+                * secondary process.
+                */
+               eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
+               eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
+               return eth_dev;
        }
+       sh = mlx5_alloc_shared_ibctx(spawn);
+       if (!sh)
+               return NULL;
+       config.devx = sh->devx;
 #ifdef HAVE_IBV_MLX5_MOD_SWP
        dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
 #endif
@@ -829,7 +1224,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
        dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
 #endif
-       mlx5_glue->dv_query_device(ctx, &dv_attr);
+       mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
        if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
                if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
                        DRV_LOG(DEBUG, "enhanced MPW is supported");
@@ -914,51 +1309,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                " old OFED/rdma-core version or firmware configuration");
 #endif
        config.mpls_en = mpls_en;
-       err = mlx5_glue->query_device_ex(ctx, NULL, &attr);
-       if (err) {
-               DEBUG("ibv_query_device_ex() failed");
-               goto error;
-       }
-       DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
-       if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-               eth_dev = rte_eth_dev_attach_secondary(name);
-               if (eth_dev == NULL) {
-                       DRV_LOG(ERR, "can not attach rte ethdev");
-                       rte_errno = ENOMEM;
-                       err = rte_errno;
-                       goto error;
-               }
-               eth_dev->device = dpdk_dev;
-               eth_dev->dev_ops = &mlx5_dev_sec_ops;
-               err = mlx5_uar_init_secondary(eth_dev);
-               if (err) {
-                       err = rte_errno;
-                       goto error;
-               }
-               /* Receive command fd from primary process */
-               err = mlx5_socket_connect(eth_dev);
-               if (err < 0) {
-                       err = rte_errno;
-                       goto error;
-               }
-               /* Remap UAR for Tx queues. */
-               err = mlx5_tx_uar_remap(eth_dev, err);
-               if (err) {
-                       err = rte_errno;
-                       goto error;
-               }
-               /*
-                * Ethdev pointer is still required as input since
-                * the primary device is not accessible from the
-                * secondary process.
-                */
-               eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
-               eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
-               claim_zero(mlx5_glue->close_device(ctx));
-               return eth_dev;
-       }
        /* Check port status. */
-       err = mlx5_glue->query_port(ctx, spawn->ibv_port, &port_attr);
+       err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr);
        if (err) {
                DRV_LOG(ERR, "port query failed: %s", strerror(err));
                goto error;
@@ -972,13 +1324,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
                        mlx5_glue->port_state_str(port_attr.state),
                        port_attr.state);
-       /* Allocate protection domain. */
-       pd = mlx5_glue->alloc_pd(ctx);
-       if (pd == NULL) {
-               DRV_LOG(ERR, "PD allocation failure");
-               err = ENOMEM;
-               goto error;
-       }
+       /* Allocate private eth device data. */
        priv = rte_zmalloc("ethdev private structure",
                           sizeof(*priv),
                           RTE_CACHE_LINE_SIZE);
@@ -987,13 +1333,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                err = ENOMEM;
                goto error;
        }
-       priv->ctx = ctx;
-       strncpy(priv->ibdev_name, priv->ctx->device->name,
-               sizeof(priv->ibdev_name));
-       strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
-               sizeof(priv->ibdev_path));
-       priv->device_attr = attr;
-       priv->pd = pd;
+       priv->sh = sh;
+       priv->ibv_port = spawn->ibv_port;
        priv->mtu = ETHER_MTU;
 #ifndef RTE_ARCH_64
        /* Initialize UAR access locks for 32bit implementations. */
@@ -1028,22 +1369,16 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
         * Look for sibling devices in order to reuse their switch domain
         * if any, otherwise allocate one.
         */
-       i = mlx5_dev_to_port_id(dpdk_dev, NULL, 0);
-       if (i > 0) {
-               uint16_t port_id[i];
-
-               i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i);
-               while (i--) {
-                       const struct mlx5_priv *opriv =
-                               rte_eth_devices[port_id[i]].data->dev_private;
+       RTE_ETH_FOREACH_DEV_OF(port_id, dpdk_dev) {
+               const struct mlx5_priv *opriv =
+                       rte_eth_devices[port_id].data->dev_private;
 
-                       if (!opriv ||
-                           opriv->domain_id ==
-                           RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
-                               continue;
-                       priv->domain_id = opriv->domain_id;
-                       break;
-               }
+               if (!opriv ||
+                       opriv->domain_id ==
+                       RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
+                       continue;
+               priv->domain_id = opriv->domain_id;
+               break;
        }
        if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
                err = rte_eth_switch_domain_alloc(&priv->domain_id);
@@ -1062,7 +1397,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                        strerror(rte_errno));
                goto error;
        }
-       config.hw_csum = !!(attr.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
+       config.hw_csum = !!(sh->device_attr.device_cap_flags_ex &
+                           IBV_DEVICE_RAW_IP_CSUM);
        DRV_LOG(DEBUG, "checksum offloading is %ssupported",
                (config.hw_csum ? "" : "not "));
 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
@@ -1076,7 +1412,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        }
 #endif
        config.ind_table_max_size =
-               attr.rss_caps.max_rwq_indirection_table_size;
+               sh->device_attr.rss_caps.max_rwq_indirection_table_size;
        /*
         * Remove this check once DPDK supports larger/variable
         * indirection tables.
@@ -1085,18 +1421,18 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
        DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
                config.ind_table_max_size);
-       config.hw_vlan_strip = !!(attr.raw_packet_caps &
+       config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
                                  IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
        DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
                (config.hw_vlan_strip ? "" : "not "));
-       config.hw_fcs_strip = !!(attr.raw_packet_caps &
+       config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
                                 IBV_RAW_PACKET_CAP_SCATTER_FCS);
        DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
                (config.hw_fcs_strip ? "" : "not "));
 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
-       hw_padding = !!attr.rx_pad_end_addr_align;
+       hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
-       hw_padding = !!(attr.device_cap_flags_ex &
+       hw_padding = !!(sh->device_attr.device_cap_flags_ex &
                        IBV_DEVICE_PCI_WRITE_END_PADDING);
 #endif
        if (config.hw_padding && !hw_padding) {
@@ -1105,11 +1441,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        } else if (config.hw_padding) {
                DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
        }
-       config.tso = (attr.tso_caps.max_tso > 0 &&
-                     (attr.tso_caps.supported_qpts &
+       config.tso = (sh->device_attr.tso_caps.max_tso > 0 &&
+                     (sh->device_attr.tso_caps.supported_qpts &
                       (1 << IBV_QPT_RAW_PACKET)));
        if (config.tso)
-               config.tso_max_payload_sz = attr.tso_caps.max_tso;
+               config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso;
        /*
         * MPW is disabled by default, while the Enhanced MPW is enabled
         * by default.
@@ -1166,11 +1502,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        priv->dev_data = eth_dev->data;
        eth_dev->data->mac_addrs = priv->mac;
        eth_dev->device = dpdk_dev;
-       err = mlx5_uar_init_primary(eth_dev);
-       if (err) {
-               err = rte_errno;
-               goto error;
-       }
        /* Configure the first MAC address by default. */
        if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
                DRV_LOG(ERR,
@@ -1242,6 +1573,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                        priv->tcf_context = NULL;
                }
        }
+       if (config.dv_flow_en) {
+               err = mlx5_alloc_shared_dr(priv);
+               if (err)
+                       goto error;
+       }
        TAILQ_INIT(&priv->flows);
        TAILQ_INIT(&priv->ctrl_flows);
        /* Hint libmlx5 to use PMD allocator for data plane resources */
@@ -1250,7 +1586,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                .free = &mlx5_free_verbs_buf,
                .data = priv,
        };
-       mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
+       mlx5_glue->dv_set_context_attr(sh->ctx,
+                                      MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
                                       (void *)((uintptr_t)&alctr));
        /* Bring Ethernet device up. */
        DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
@@ -1292,6 +1629,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        return eth_dev;
 error:
        if (priv) {
+               if (priv->sh)
+                       mlx5_free_shared_dr(priv);
                if (priv->nl_socket_route >= 0)
                        close(priv->nl_socket_route);
                if (priv->nl_socket_rdma >= 0)
@@ -1304,15 +1643,13 @@ error:
                if (eth_dev != NULL)
                        eth_dev->data->dev_private = NULL;
        }
-       if (pd)
-               claim_zero(mlx5_glue->dealloc_pd(pd));
        if (eth_dev != NULL) {
                /* mac_addrs must not be freed alone because part of dev_private */
                eth_dev->data->mac_addrs = NULL;
                rte_eth_dev_release_port(eth_dev);
        }
-       if (ctx)
-               claim_zero(mlx5_glue->close_device(ctx));
+       if (sh)
+               mlx5_free_shared_ibctx(sh);
        assert(err > 0);
        rte_errno = err;
        return NULL;
@@ -1395,6 +1732,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
        struct mlx5_dev_config dev_config;
        int ret;
 
+       ret = mlx5_init_once();
+       if (ret) {
+               DRV_LOG(ERR, "unable to init PMD global data: %s",
+                       strerror(rte_errno));
+               return -rte_errno;
+       }
        assert(pci_drv == &mlx5_driver);
        errno = 0;
        ibv_list = mlx5_glue->get_device_list(&ret);
@@ -1613,6 +1956,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                .txqs_vec = MLX5_ARG_UNSET,
                .inline_max_packet_sz = MLX5_ARG_UNSET,
                .vf_nl_en = 1,
+               .mr_ext_memseg_en = 1,
                .mprq = {
                        .enabled = 0, /* Disabled by default. */
                        .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,