net/mlx5: remove redundant flag in device config
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 96e8d18..8b7629b 100644 (file)
@@ -43,6 +43,9 @@
 
 #define MLX5_ETH_DRIVER_NAME mlx5_eth
 
+/* Driver type key for new device global syntax. */
+#define MLX5_DRIVER_KEY "driver"
+
 /* Device parameter to enable RX completion queue compression. */
 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
 
 /* Device parameter to configure allow or prevent duplicate rules pattern. */
 #define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern"
 
+/* Device parameter to configure implicit registration of mempool memory. */
+#define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en"
+
 /* Shared memory between primary and secondary processes. */
 struct mlx5_shared_data *mlx5_shared_data;
 
@@ -399,6 +405,24 @@ mlx5_is_hpf(struct rte_eth_dev *dev)
               MLX5_REPRESENTOR_REPR(-1) == repr;
 }
 
+/**
+ * Decide whether representor ID is a SF port representor.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ *
+ * @return
+ *   Non-zero if HPF, otherwise 0.
+ */
+bool
+mlx5_is_sf_repr(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
+
+       return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_SF;
+}
+
 /**
  * Initialize the ASO aging management structure.
  *
@@ -929,6 +953,36 @@ mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
        prf->obj = NULL;
 }
 
+uint32_t
+mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
+{
+       uint32_t sw_parsing_offloads = 0;
+
+       if (attr->swp) {
+               sw_parsing_offloads |= MLX5_SW_PARSING_CAP;
+               if (attr->swp_csum)
+                       sw_parsing_offloads |= MLX5_SW_PARSING_CSUM_CAP;
+
+               if (attr->swp_lso)
+                       sw_parsing_offloads |= MLX5_SW_PARSING_TSO_CAP;
+       }
+       return sw_parsing_offloads;
+}
+
+uint32_t
+mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
+{
+       uint32_t tn_offloads = 0;
+
+       if (attr->tunnel_stateless_vxlan)
+               tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP;
+       if (attr->tunnel_stateless_gre)
+               tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP;
+       if (attr->tunnel_stateless_geneve_rx)
+               tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP;
+       return tn_offloads;
+}
+
 /*
  * Allocate Rx and Tx UARs in robust fashion.
  * This routine handles the following UAR allocation issues:
@@ -944,7 +998,7 @@ mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
  */
 static int
 mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
-                    const struct mlx5_dev_config *config)
+                    const struct mlx5_common_dev_config *config)
 {
        uint32_t uar_mapping, retry;
        int err = 0;
@@ -1067,6 +1121,141 @@ exit:
        return err;
 }
 
+/**
+ * Unregister the mempool from the protection domain.
+ *
+ * @param sh
+ *   Pointer to the device shared context.
+ * @param mp
+ *   Mempool being unregistered.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
+                                      struct rte_mempool *mp)
+{
+       struct mlx5_mp_id mp_id;
+
+       mlx5_mp_id_init(&mp_id, 0);
+       if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)
+               DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
+                       mp->name, sh->pd, rte_strerror(rte_errno));
+}
+
+/**
+ * rte_mempool_walk() callback to register mempools
+ * for the protection domain.
+ *
+ * @param mp
+ *   The mempool being walked.
+ * @param arg
+ *   Pointer to the device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
+{
+       struct mlx5_dev_ctx_shared *sh = arg;
+       struct mlx5_mp_id mp_id;
+       int ret;
+
+       mlx5_mp_id_init(&mp_id, 0);
+       ret = mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp, &mp_id);
+       if (ret < 0 && rte_errno != EEXIST)
+               DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
+                       mp->name, sh->pd, rte_strerror(rte_errno));
+}
+
+/**
+ * rte_mempool_walk() callback to unregister mempools
+ * from the protection domain.
+ *
+ * @param mp
+ *   The mempool being walked.
+ * @param arg
+ *   Pointer to the device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
+{
+       mlx5_dev_ctx_shared_mempool_unregister
+                               ((struct mlx5_dev_ctx_shared *)arg, mp);
+}
+
+/**
+ * Mempool life cycle callback for Ethernet devices.
+ *
+ * @param event
+ *   Mempool life cycle event.
+ * @param mp
+ *   Associated mempool.
+ * @param arg
+ *   Pointer to a device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
+                                    struct rte_mempool *mp, void *arg)
+{
+       struct mlx5_dev_ctx_shared *sh = arg;
+       struct mlx5_mp_id mp_id;
+
+       switch (event) {
+       case RTE_MEMPOOL_EVENT_READY:
+               mlx5_mp_id_init(&mp_id, 0);
+               if (mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp,
+                                            &mp_id) < 0)
+                       DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
+                               mp->name, sh->pd, rte_strerror(rte_errno));
+               break;
+       case RTE_MEMPOOL_EVENT_DESTROY:
+               mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+               break;
+       }
+}
+
+/**
+ * Callback used when implicit mempool registration is disabled
+ * in order to track Rx mempool destruction.
+ *
+ * @param event
+ *   Mempool life cycle event.
+ * @param mp
+ *   An Rx mempool registered explicitly when the port is started.
+ * @param arg
+ *   Pointer to a device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
+                                       struct rte_mempool *mp, void *arg)
+{
+       struct mlx5_dev_ctx_shared *sh = arg;
+
+       if (event == RTE_MEMPOOL_EVENT_DESTROY)
+               mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+}
+
+int
+mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       int ret;
+
+       /* Check if we only need to track Rx mempool destruction. */
+       if (!sh->cdev->config.mr_mempool_reg_en) {
+               ret = rte_mempool_event_callback_register
+                               (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
+               return ret == 0 || rte_errno == EEXIST ? 0 : ret;
+       }
+       /* Callback for this shared context may be already registered. */
+       ret = rte_mempool_event_callback_register
+                               (mlx5_dev_ctx_shared_mempool_event_cb, sh);
+       if (ret != 0 && rte_errno != EEXIST)
+               return ret;
+       /* Register mempools only once for this shared context. */
+       if (ret == 0)
+               rte_mempool_walk(mlx5_dev_ctx_shared_mempool_register_cb, sh);
+       return 0;
+}
+
 /**
  * Allocate shared device context. If there is multiport device the
  * master and representors will share this context, if there is single
@@ -1103,7 +1292,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        /* Search for IB context by device name. */
        LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
                if (!strcmp(sh->ibdev_name,
-                       mlx5_os_get_dev_device_name(spawn->phys_dev))) {
+                       mlx5_os_get_ctx_device_name(spawn->ctx))) {
                        sh->refcnt++;
                        goto exit;
                }
@@ -1120,12 +1309,13 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                rte_errno  = ENOMEM;
                goto exit;
        }
-       sh->numa_node = spawn->numa_node;
+       pthread_mutex_init(&sh->txpp.mutex, NULL);
+       sh->numa_node = spawn->cdev->dev->numa_node;
+       sh->cdev = spawn->cdev;
+       sh->devx = sh->cdev->config.devx;
+       sh->ctx = spawn->ctx;
        if (spawn->bond_info)
                sh->bond = *spawn->bond_info;
-       err = mlx5_os_open_device(spawn, config, sh);
-       if (!sh->ctx)
-               goto error;
        err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr);
        if (err) {
                DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
@@ -1172,7 +1362,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                        err = ENOMEM;
                        goto error;
                }
-               err = mlx5_alloc_rxtx_uars(sh, config);
+               err = mlx5_alloc_rxtx_uars(sh, &sh->cdev->config);
                if (err)
                        goto error;
                MLX5_ASSERT(sh->tx_uar);
@@ -1206,11 +1396,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
                              &sh->share_cache.dereg_mr_cb);
        mlx5_os_dev_shared_handler_install(sh);
-       sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
-       if (!sh->cnt_id_tbl) {
-               err = rte_errno;
-               goto error;
-       }
        if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
                err = mlx5_flow_os_init_workspace_once();
                if (err)
@@ -1234,8 +1419,8 @@ error:
        pthread_mutex_destroy(&sh->txpp.mutex);
        pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
        MLX5_ASSERT(sh);
-       if (sh->cnt_id_tbl)
-               mlx5_l3t_destroy(sh->cnt_id_tbl);
+       if (sh->share_cache.cache.table)
+               mlx5_mr_btree_free(&sh->share_cache.cache);
        if (sh->tis)
                claim_zero(mlx5_devx_cmd_destroy(sh->tis));
        if (sh->td)
@@ -1264,6 +1449,8 @@ error:
 void
 mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 {
+       int ret;
+
        pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
        /* Check the object presence in the list. */
@@ -1284,6 +1471,15 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
        MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        if (--sh->refcnt)
                goto exit;
+       /* Stop watching for mempool events and unregister all mempools. */
+       ret = rte_mempool_event_callback_unregister
+                               (mlx5_dev_ctx_shared_mempool_event_cb, sh);
+       if (ret < 0 && rte_errno == ENOENT)
+               ret = rte_mempool_event_callback_unregister
+                               (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
+       if (ret == 0)
+               rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
+                                sh);
        /* Remove from memory callback device list. */
        rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
        LIST_REMOVE(sh, mem_event_cb);
@@ -1309,10 +1505,6 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
                mlx5_aso_flow_mtrs_mng_close(sh);
        mlx5_flow_ipool_destroy(sh);
        mlx5_os_dev_shared_handler_uninstall(sh);
-       if (sh->cnt_id_tbl) {
-               mlx5_l3t_destroy(sh->cnt_id_tbl);
-               sh->cnt_id_tbl = NULL;
-       }
        if (sh->tx_uar) {
                mlx5_glue->devx_free_uar(sh->tx_uar);
                sh->tx_uar = NULL;
@@ -1349,6 +1541,7 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv)
        if (!sh->flow_tbls)
                return;
        mlx5_hlist_destroy(sh->flow_tbls);
+       sh->flow_tbls = NULL;
 }
 
 /**
@@ -1562,6 +1755,11 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                priv->rxqs_n = 0;
                priv->rxqs = NULL;
        }
+       if (priv->representor) {
+               /* Each representor has a dedicated interrupts handler */
+               mlx5_free(dev->intr_handle);
+               dev->intr_handle = NULL;
+       }
        if (priv->txqs != NULL) {
                /* XXX race condition if mlx5_tx_burst() is still running. */
                rte_delay_us_sleep(1000);
@@ -1738,6 +1936,7 @@ const struct eth_dev_ops mlx5_dev_sec_ops = {
        .xstats_get_names = mlx5_xstats_get_names,
        .fw_version_get = mlx5_fw_version_get,
        .dev_infos_get = mlx5_dev_infos_get,
+       .representor_info_get = mlx5_representor_info_get,
        .read_clock = mlx5_txpp_read_clock,
        .rx_queue_start = mlx5_rx_queue_start,
        .rx_queue_stop = mlx5_rx_queue_stop,
@@ -1771,6 +1970,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
        .xstats_get_names = mlx5_xstats_get_names,
        .fw_version_get = mlx5_fw_version_get,
        .dev_infos_get = mlx5_dev_infos_get,
+       .representor_info_get = mlx5_representor_info_get,
        .read_clock = mlx5_txpp_read_clock,
        .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
        .vlan_filter_set = mlx5_vlan_filter_set,
@@ -1835,7 +2035,10 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
        signed long tmp;
 
        /* No-op, port representors are processed in mlx5_dev_spawn(). */
-       if (!strcmp(MLX5_REPRESENTOR, key))
+       if (!strcmp(MLX5_DRIVER_KEY, key) || !strcmp(MLX5_REPRESENTOR, key) ||
+           !strcmp(MLX5_SYS_MEM_EN, key) || !strcmp(MLX5_TX_DB_NC, key) ||
+           !strcmp(MLX5_MR_MEMPOOL_REG_EN, key) ||
+           !strcmp(MLX5_MR_EXT_MEMSEG_EN, key))
                return 0;
        errno = 0;
        tmp = strtol(val, NULL, 0);
@@ -1888,16 +2091,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
        } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
                config->mps = !!tmp;
-       } else if (strcmp(MLX5_TX_DB_NC, key) == 0) {
-               if (tmp != MLX5_TXDB_CACHED &&
-                   tmp != MLX5_TXDB_NCACHED &&
-                   tmp != MLX5_TXDB_HEURISTIC) {
-                       DRV_LOG(ERR, "invalid Tx doorbell "
-                                    "mapping parameter");
-                       rte_errno = EINVAL;
-                       return -rte_errno;
-               }
-               config->dbnc = tmp;
        } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
                DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
        } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
@@ -1941,8 +2134,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                        config->dv_miss_info = 1;
        } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
                config->lacp_by_user = !!tmp;
-       } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
-               config->mr_ext_memseg_en = !!tmp;
        } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
                config->max_dump_files_num = tmp;
        } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
@@ -1960,8 +2151,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                        return -rte_errno;
                }
                config->reclaim_mode = tmp;
-       } else if (strcmp(MLX5_SYS_MEM_EN, key) == 0) {
-               config->sys_mem_en = !!tmp;
        } else if (strcmp(MLX5_DECAP_EN, key) == 0) {
                config->decap_en = !!tmp;
        } else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
@@ -1989,6 +2178,7 @@ int
 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
 {
        const char **params = (const char *[]){
+               MLX5_DRIVER_KEY,
                MLX5_RXQ_CQE_COMP_EN,
                MLX5_RXQ_PKT_PAD_EN,
                MLX5_RX_MPRQ_EN,
@@ -2026,6 +2216,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
                MLX5_SYS_MEM_EN,
                MLX5_DECAP_EN,
                MLX5_ALLOW_DUPLICATE_PATTERN,
+               MLX5_MR_MEMPOOL_REG_EN,
                NULL,
        };
        struct rte_kvargs *kvlist;
@@ -2272,7 +2463,8 @@ rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
  */
 int
 mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
-                             struct mlx5_dev_config *config)
+                             struct mlx5_dev_config *config,
+                             struct rte_device *dpdk_dev)
 {
        struct mlx5_dev_ctx_shared *sh = priv->sh;
        struct mlx5_dev_config *sh_conf = NULL;
@@ -2283,7 +2475,7 @@ mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
        if (sh->refcnt == 1)
                return 0;
        /* Find the device with shared context. */
-       MLX5_ETH_FOREACH_DEV(port_id, NULL) {
+       MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
                struct mlx5_priv *opriv =
                        rte_eth_devices[port_id].data->dev_private;
 
@@ -2335,7 +2527,10 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
                    (dev->device == odev ||
                     (dev->device->driver &&
                     dev->device->driver->name &&
-                    !strcmp(dev->device->driver->name, MLX5_PCI_DRIVER_NAME))))
+                    ((strcmp(dev->device->driver->name,
+                             MLX5_PCI_DRIVER_NAME) == 0) ||
+                     (strcmp(dev->device->driver->name,
+                             MLX5_AUXILIARY_DRIVER_NAME) == 0)))))
                        break;
                port_id++;
        }
@@ -2349,19 +2544,19 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
  *
  * This function removes all Ethernet devices belong to a given device.
  *
- * @param[in] dev
+ * @param[in] cdev
  *   Pointer to the generic device.
  *
  * @return
  *   0 on success, the function cannot fail.
  */
-static int
-mlx5_net_remove(struct rte_device *dev)
+int
+mlx5_net_remove(struct mlx5_common_device *cdev)
 {
        uint16_t port_id;
        int ret = 0;
 
-       RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
+       RTE_ETH_FOREACH_DEV_OF(port_id, cdev->dev) {
                /*
                 * mlx5_dev_close() is not registered to secondary process,
                 * call the close function explicitly for secondary process.