#define MLX5_ETH_DRIVER_NAME mlx5_eth
+/* Driver type key for new device global syntax. */
+#define MLX5_DRIVER_KEY "driver"
+
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
/* Device parameter to configure allow or prevent duplicate rules pattern. */
#define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern"
+/* Device parameter to configure implicit registration of mempool memory. */
+#define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en"
+
/* Shared memory between primary and secondary processes. */
struct mlx5_shared_data *mlx5_shared_data;
MLX5_REPRESENTOR_REPR(-1) == repr;
}
+/**
+ * Decide whether representor ID is a SF port representor.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * Non-zero if HPF, otherwise 0.
+ */
+bool
+mlx5_is_sf_repr(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
+
+ return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_SF;
+}
+
/**
* Initialize the ASO aging management structure.
*
prf->obj = NULL;
}
+uint32_t
+mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
+{
+ uint32_t sw_parsing_offloads = 0;
+
+ if (attr->swp) {
+ sw_parsing_offloads |= MLX5_SW_PARSING_CAP;
+ if (attr->swp_csum)
+ sw_parsing_offloads |= MLX5_SW_PARSING_CSUM_CAP;
+
+ if (attr->swp_lso)
+ sw_parsing_offloads |= MLX5_SW_PARSING_TSO_CAP;
+ }
+ return sw_parsing_offloads;
+}
+
+uint32_t
+mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
+{
+ uint32_t tn_offloads = 0;
+
+ if (attr->tunnel_stateless_vxlan)
+ tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP;
+ if (attr->tunnel_stateless_gre)
+ tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP;
+ if (attr->tunnel_stateless_geneve_rx)
+ tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP;
+ return tn_offloads;
+}
+
/*
* Allocate Rx and Tx UARs in robust fashion.
* This routine handles the following UAR allocation issues:
*/
static int
mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
- const struct mlx5_dev_config *config)
+ const struct mlx5_common_dev_config *config)
{
uint32_t uar_mapping, retry;
int err = 0;
return err;
}
+/**
+ * Unregister the mempool from the protection domain.
+ *
+ * @param sh
+ * Pointer to the device shared context.
+ * @param mp
+ * Mempool being unregistered.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
+ struct rte_mempool *mp)
+{
+ struct mlx5_mp_id mp_id;
+
+ mlx5_mp_id_init(&mp_id, 0);
+ if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)
+ DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
+ mp->name, sh->pd, rte_strerror(rte_errno));
+}
+
+/**
+ * rte_mempool_walk() callback to register mempools
+ * for the protection domain.
+ *
+ * @param mp
+ * The mempool being walked.
+ * @param arg
+ * Pointer to the device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
+{
+ struct mlx5_dev_ctx_shared *sh = arg;
+ struct mlx5_mp_id mp_id;
+ int ret;
+
+ mlx5_mp_id_init(&mp_id, 0);
+ ret = mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp, &mp_id);
+ if (ret < 0 && rte_errno != EEXIST)
+ DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
+ mp->name, sh->pd, rte_strerror(rte_errno));
+}
+
+/**
+ * rte_mempool_walk() callback to unregister mempools
+ * from the protection domain.
+ *
+ * @param mp
+ * The mempool being walked.
+ * @param arg
+ * Pointer to the device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
+{
+ mlx5_dev_ctx_shared_mempool_unregister
+ ((struct mlx5_dev_ctx_shared *)arg, mp);
+}
+
+/**
+ * Mempool life cycle callback for Ethernet devices.
+ *
+ * @param event
+ * Mempool life cycle event.
+ * @param mp
+ * Associated mempool.
+ * @param arg
+ * Pointer to a device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
+ struct rte_mempool *mp, void *arg)
+{
+ struct mlx5_dev_ctx_shared *sh = arg;
+ struct mlx5_mp_id mp_id;
+
+ switch (event) {
+ case RTE_MEMPOOL_EVENT_READY:
+ mlx5_mp_id_init(&mp_id, 0);
+ if (mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp,
+ &mp_id) < 0)
+ DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
+ mp->name, sh->pd, rte_strerror(rte_errno));
+ break;
+ case RTE_MEMPOOL_EVENT_DESTROY:
+ mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+ break;
+ }
+}
+
+/**
+ * Callback used when implicit mempool registration is disabled
+ * in order to track Rx mempool destruction.
+ *
+ * @param event
+ * Mempool life cycle event.
+ * @param mp
+ * An Rx mempool registered explicitly when the port is started.
+ * @param arg
+ * Pointer to a device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
+ struct rte_mempool *mp, void *arg)
+{
+ struct mlx5_dev_ctx_shared *sh = arg;
+
+ if (event == RTE_MEMPOOL_EVENT_DESTROY)
+ mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+}
+
+int
+mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ int ret;
+
+ /* Check if we only need to track Rx mempool destruction. */
+ if (!sh->cdev->config.mr_mempool_reg_en) {
+ ret = rte_mempool_event_callback_register
+ (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
+ return ret == 0 || rte_errno == EEXIST ? 0 : ret;
+ }
+ /* Callback for this shared context may be already registered. */
+ ret = rte_mempool_event_callback_register
+ (mlx5_dev_ctx_shared_mempool_event_cb, sh);
+ if (ret != 0 && rte_errno != EEXIST)
+ return ret;
+ /* Register mempools only once for this shared context. */
+ if (ret == 0)
+ rte_mempool_walk(mlx5_dev_ctx_shared_mempool_register_cb, sh);
+ return 0;
+}
+
/**
* Allocate shared device context. If there is multiport device the
* master and representors will share this context, if there is single
/* Search for IB context by device name. */
LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
if (!strcmp(sh->ibdev_name,
- mlx5_os_get_dev_device_name(spawn->phys_dev))) {
+ mlx5_os_get_ctx_device_name(spawn->ctx))) {
sh->refcnt++;
goto exit;
}
rte_errno = ENOMEM;
goto exit;
}
- sh->numa_node = spawn->numa_node;
+ pthread_mutex_init(&sh->txpp.mutex, NULL);
+ sh->numa_node = spawn->cdev->dev->numa_node;
+ sh->cdev = spawn->cdev;
+ sh->devx = sh->cdev->config.devx;
+ sh->ctx = spawn->ctx;
if (spawn->bond_info)
sh->bond = *spawn->bond_info;
- err = mlx5_os_open_device(spawn, config, sh);
- if (!sh->ctx)
- goto error;
err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr);
if (err) {
DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
err = ENOMEM;
goto error;
}
- err = mlx5_alloc_rxtx_uars(sh, config);
+ err = mlx5_alloc_rxtx_uars(sh, &sh->cdev->config);
if (err)
goto error;
MLX5_ASSERT(sh->tx_uar);
mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
&sh->share_cache.dereg_mr_cb);
mlx5_os_dev_shared_handler_install(sh);
- sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
- if (!sh->cnt_id_tbl) {
- err = rte_errno;
- goto error;
- }
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
err = mlx5_flow_os_init_workspace_once();
if (err)
pthread_mutex_destroy(&sh->txpp.mutex);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
- if (sh->cnt_id_tbl)
- mlx5_l3t_destroy(sh->cnt_id_tbl);
+ if (sh->share_cache.cache.table)
+ mlx5_mr_btree_free(&sh->share_cache.cache);
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
void
mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
{
+ int ret;
+
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Check the object presence in the list. */
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
if (--sh->refcnt)
goto exit;
+ /* Stop watching for mempool events and unregister all mempools. */
+ ret = rte_mempool_event_callback_unregister
+ (mlx5_dev_ctx_shared_mempool_event_cb, sh);
+ if (ret < 0 && rte_errno == ENOENT)
+ ret = rte_mempool_event_callback_unregister
+ (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
+ if (ret == 0)
+ rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
+ sh);
/* Remove from memory callback device list. */
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
LIST_REMOVE(sh, mem_event_cb);
mlx5_aso_flow_mtrs_mng_close(sh);
mlx5_flow_ipool_destroy(sh);
mlx5_os_dev_shared_handler_uninstall(sh);
- if (sh->cnt_id_tbl) {
- mlx5_l3t_destroy(sh->cnt_id_tbl);
- sh->cnt_id_tbl = NULL;
- }
if (sh->tx_uar) {
mlx5_glue->devx_free_uar(sh->tx_uar);
sh->tx_uar = NULL;
if (!sh->flow_tbls)
return;
mlx5_hlist_destroy(sh->flow_tbls);
+ sh->flow_tbls = NULL;
}
/**
priv->rxqs_n = 0;
priv->rxqs = NULL;
}
+ if (priv->representor) {
+ /* Each representor has a dedicated interrupts handler */
+ mlx5_free(dev->intr_handle);
+ dev->intr_handle = NULL;
+ }
if (priv->txqs != NULL) {
/* XXX race condition if mlx5_tx_burst() is still running. */
rte_delay_us_sleep(1000);
.xstats_get_names = mlx5_xstats_get_names,
.fw_version_get = mlx5_fw_version_get,
.dev_infos_get = mlx5_dev_infos_get,
+ .representor_info_get = mlx5_representor_info_get,
.read_clock = mlx5_txpp_read_clock,
.rx_queue_start = mlx5_rx_queue_start,
.rx_queue_stop = mlx5_rx_queue_stop,
.xstats_get_names = mlx5_xstats_get_names,
.fw_version_get = mlx5_fw_version_get,
.dev_infos_get = mlx5_dev_infos_get,
+ .representor_info_get = mlx5_representor_info_get,
.read_clock = mlx5_txpp_read_clock,
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
signed long tmp;
/* No-op, port representors are processed in mlx5_dev_spawn(). */
- if (!strcmp(MLX5_REPRESENTOR, key))
+ if (!strcmp(MLX5_DRIVER_KEY, key) || !strcmp(MLX5_REPRESENTOR, key) ||
+ !strcmp(MLX5_SYS_MEM_EN, key) || !strcmp(MLX5_TX_DB_NC, key) ||
+ !strcmp(MLX5_MR_MEMPOOL_REG_EN, key) ||
+ !strcmp(MLX5_MR_EXT_MEMSEG_EN, key))
return 0;
errno = 0;
tmp = strtol(val, NULL, 0);
DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
config->mps = !!tmp;
- } else if (strcmp(MLX5_TX_DB_NC, key) == 0) {
- if (tmp != MLX5_TXDB_CACHED &&
- tmp != MLX5_TXDB_NCACHED &&
- tmp != MLX5_TXDB_HEURISTIC) {
- DRV_LOG(ERR, "invalid Tx doorbell "
- "mapping parameter");
- rte_errno = EINVAL;
- return -rte_errno;
- }
- config->dbnc = tmp;
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
config->dv_miss_info = 1;
} else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
config->lacp_by_user = !!tmp;
- } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
- config->mr_ext_memseg_en = !!tmp;
} else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
config->max_dump_files_num = tmp;
} else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
return -rte_errno;
}
config->reclaim_mode = tmp;
- } else if (strcmp(MLX5_SYS_MEM_EN, key) == 0) {
- config->sys_mem_en = !!tmp;
} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
config->decap_en = !!tmp;
} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
{
const char **params = (const char *[]){
+ MLX5_DRIVER_KEY,
MLX5_RXQ_CQE_COMP_EN,
MLX5_RXQ_PKT_PAD_EN,
MLX5_RX_MPRQ_EN,
MLX5_SYS_MEM_EN,
MLX5_DECAP_EN,
MLX5_ALLOW_DUPLICATE_PATTERN,
+ MLX5_MR_MEMPOOL_REG_EN,
NULL,
};
struct rte_kvargs *kvlist;
*/
int
mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
- struct mlx5_dev_config *config)
+ struct mlx5_dev_config *config,
+ struct rte_device *dpdk_dev)
{
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_dev_config *sh_conf = NULL;
if (sh->refcnt == 1)
return 0;
/* Find the device with shared context. */
- MLX5_ETH_FOREACH_DEV(port_id, NULL) {
+ MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
(dev->device == odev ||
(dev->device->driver &&
dev->device->driver->name &&
- !strcmp(dev->device->driver->name, MLX5_PCI_DRIVER_NAME))))
+ ((strcmp(dev->device->driver->name,
+ MLX5_PCI_DRIVER_NAME) == 0) ||
+ (strcmp(dev->device->driver->name,
+ MLX5_AUXILIARY_DRIVER_NAME) == 0)))))
break;
port_id++;
}
*
* This function removes all Ethernet devices belong to a given device.
*
- * @param[in] dev
+ * @param[in] cdev
* Pointer to the generic device.
*
* @return
* 0 on success, the function cannot fail.
*/
-static int
-mlx5_net_remove(struct rte_device *dev)
+int
+mlx5_net_remove(struct mlx5_common_device *cdev)
{
uint16_t port_id;
int ret = 0;
- RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
+ RTE_ETH_FOREACH_DEV_OF(port_id, cdev->dev) {
/*
* mlx5_dev_close() is not registered to secondary process,
* call the close function explicitly for secondary process.