#include <sys/mman.h>
#include <linux/rtnetlink.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
#include <rte_malloc.h>
#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_common.h>
+#include <mlx5_common_os.h>
#include <mlx5_common_mp.h>
+#include <mlx5_common_pci.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
*/
#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
+/*
+ * Device parameter to enable Tx scheduling on timestamps
+ * and specify the packet pacing granularity in nanoseconds.
+ */
+#define MLX5_TX_PP "tx_pp"
+
+/*
+ * Device parameter to specify skew in nanoseconds on Tx datapath,
+ * it represents the time between SQ start WQE processing and
+ * appearing actual packet data on the wire.
+ */
+#define MLX5_TX_SKEW "tx_skew"
+
/*
* Device parameter to enable hardware Tx vector.
* Deprecated, ignored (no vectorized Tx routines anymore).
/* Enable extensive flow metadata support. */
#define MLX5_DV_XMETA_EN "dv_xmeta_en"
+/* Device parameter to let the user manage the lacp traffic of bonded device */
+#define MLX5_LACP_BY_USER "lacp_by_user"
+
/* Activate Netlink support in VF mode. */
#define MLX5_VF_NL_EN "vf_nl_en"
/* Flow memory reclaim mode. */
#define MLX5_RECLAIM_MEM "reclaim_mem_mode"
-static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
+/* The default memory allocator used in PMD. */
+#define MLX5_SYS_MEM_EN "sys_mem_en"
+/* Decap will be used or not. */
+#define MLX5_DECAP_EN "decap_en"
/* Shared memory between primary and secondary processes. */
struct mlx5_shared_data *mlx5_shared_data;
-/* Spinlock for mlx5_shared_data allocation. */
-static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
-
-/* Process local data for secondary processes. */
-static struct mlx5_local_data mlx5_local_data;
/** Driver-specific log messages type. */
int mlx5_logtype;
-static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
-static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
+ LIST_HEAD_INITIALIZER();
+static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
-static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
+static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
{
.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_encap_decap_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_push_vlan_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_tag_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_port_id_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_jump_ipool",
},
#endif
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_meter_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_mcp_ipool",
},
{
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_hrxq_ipool",
},
{
- .size = sizeof(struct mlx5_flow_handle),
+ /*
+ * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
+ * It set in run time according to PCI function configuration.
+ */
+ .size = 0,
.trunk_size = 64,
.grow_trunk = 3,
.grow_shift = 2,
.need_lock = 0,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "mlx5_flow_handle_ipool",
},
{
.trunk_size = 4096,
.need_lock = 1,
.release_mem_en = 1,
- .malloc = rte_malloc_socket,
- .free = rte_free,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
.type = "rte_flow_ipool",
},
};
struct mlx5_flow_id_pool *pool;
void *mem;
- pool = rte_zmalloc("id pool allocation", sizeof(*pool),
- RTE_CACHE_LINE_SIZE);
+ pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!pool) {
DRV_LOG(ERR, "can't allocate id pool");
rte_errno = ENOMEM;
return NULL;
}
- mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
+ mem = mlx5_malloc(MLX5_MEM_ZERO,
+ MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!mem) {
DRV_LOG(ERR, "can't allocate mem for id pool");
rte_errno = ENOMEM;
pool->max_id = max_id;
return pool;
error:
- rte_free(pool);
+ mlx5_free(pool);
return NULL;
}
void
mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool)
{
- rte_free(pool->free_arr);
- rte_free(pool);
+ mlx5_free(pool->free_arr);
+ mlx5_free(pool);
}
/**
size = pool->curr - pool->free_arr;
size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
MLX5_ASSERT(size2 > size);
- mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
+ mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0,
+ SOCKET_ID_ANY);
if (!mem) {
DRV_LOG(ERR, "can't allocate mem for id pool");
rte_errno = ENOMEM;
return -rte_errno;
}
memcpy(mem, pool->free_arr, size * sizeof(uint32_t));
- rte_free(pool->free_arr);
+ mlx5_free(pool->free_arr);
pool->free_arr = mem;
pool->curr = pool->free_arr + size;
pool->last = pool->free_arr + size2;
memset(&sh->cmng, 0, sizeof(sh->cmng));
TAILQ_INIT(&sh->cmng.flow_counters);
for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) {
+ sh->cmng.ccont[i].min_id = MLX5_CNT_BATCH_OFFSET;
+ sh->cmng.ccont[i].max_id = -1;
+ sh->cmng.ccont[i].last_pool_idx = POOL_IDX_INVALID;
TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
rte_spinlock_init(&sh->cmng.ccont[i].resize_sl);
+ TAILQ_INIT(&sh->cmng.ccont[i].counters);
+ rte_spinlock_init(&sh->cmng.ccont[i].csl);
}
}
LIST_REMOVE(mng, next);
claim_zero(mlx5_devx_cmd_destroy(mng->dm));
claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
- rte_free(mem);
+ mlx5_free(mem);
}
/**
(pool, j)->dcs));
}
TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next);
- rte_free(pool);
+ mlx5_free(pool);
pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
}
- rte_free(sh->cmng.ccont[i].pools);
+ mlx5_free(sh->cmng.ccont[i].pools);
}
mng = LIST_FIRST(&sh->cmng.mem_mngs);
while (mng) {
*/
static void
mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,
- const struct mlx5_dev_config *config __rte_unused)
+ const struct mlx5_dev_config *config)
{
uint8_t i;
+ struct mlx5_indexed_pool_config cfg;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- /*
- * While DV is supported, user chooses the verbs mode,
- * the mlx5 flow handle size is different with the
- * MLX5_FLOW_HANDLE_VERBS_SIZE.
- */
- if (!config->dv_flow_en)
- mlx5_ipool_cfg[MLX5_IPOOL_MLX5_FLOW].size =
- MLX5_FLOW_HANDLE_VERBS_SIZE;
-#endif
for (i = 0; i < MLX5_IPOOL_MAX; ++i) {
+ cfg = mlx5_ipool_cfg[i];
+ switch (i) {
+ default:
+ break;
+ /*
+ * Set MLX5_IPOOL_MLX5_FLOW ipool size
+ * according to PCI function flow configuration.
+ */
+ case MLX5_IPOOL_MLX5_FLOW:
+ cfg.size = config->dv_flow_en ?
+ sizeof(struct mlx5_flow_handle) :
+ MLX5_FLOW_HANDLE_VERBS_SIZE;
+ break;
+ }
if (config->reclaim_mode)
- mlx5_ipool_cfg[i].release_mem_en = 1;
- sh->ipool[i] = mlx5_ipool_create(&mlx5_ipool_cfg[i]);
+ cfg.release_mem_en = 1;
+ sh->ipool[i] = mlx5_ipool_create(&cfg);
}
}
mlx5_ipool_destroy(sh->ipool[i]);
}
+/*
+ * Check if dynamic flex parser for eCPRI already exists.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * true on exists, false on not.
+ */
+bool
+mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_parser_profiles *prf =
+ &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+
+ return !!prf->obj;
+}
+
+/*
+ * Allocation of a flex parser for eCPRI. Once created, this parser related
+ * resources will be held until the device is closed.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_parser_profiles *prf =
+ &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+ struct mlx5_devx_graph_node_attr node = {
+ .modify_field_select = 0,
+ };
+ uint32_t ids[8];
+ int ret;
+
+ if (!priv->config.hca_attr.parse_graph_flex_node) {
+ DRV_LOG(ERR, "Dynamic flex parser is not supported "
+ "for device %s.", priv->dev_data->name);
+ return -ENOTSUP;
+ }
+ node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
+ /* 8 bytes now: 4B common header + 4B message body header. */
+ node.header_length_base_value = 0x8;
+ /* After MAC layer: Ether / VLAN. */
+ node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC;
+ /* Type of compared condition should be 0xAEFE in the L2 layer. */
+ node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI;
+ /* Sample #0: type in common header. */
+ node.sample[0].flow_match_sample_en = 1;
+ /* Fixed offset. */
+ node.sample[0].flow_match_sample_offset_mode = 0x0;
+ /* Only the 2nd byte will be used. */
+ node.sample[0].flow_match_sample_field_base_offset = 0x0;
+ /* Sample #1: message payload. */
+ node.sample[1].flow_match_sample_en = 1;
+ /* Fixed offset. */
+ node.sample[1].flow_match_sample_offset_mode = 0x0;
+ /*
+ * Only the first two bytes will be used right now, and its offset will
+ * start after the common header that with the length of a DW(u32).
+ */
+ node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
+ prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node);
+ if (!prf->obj) {
+ DRV_LOG(ERR, "Failed to create flex parser node object.");
+ return (rte_errno == 0) ? -ENODEV : -rte_errno;
+ }
+ prf->num = 2;
+ ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to query sample IDs.");
+ return (rte_errno == 0) ? -ENODEV : -rte_errno;
+ }
+ prf->offset[0] = 0x0;
+ prf->offset[1] = sizeof(uint32_t);
+ prf->ids[0] = ids[0];
+ prf->ids[1] = ids[1];
+ return 0;
+}
+
+/*
+ * Destroy the flex parser node, including the parser itself, input / output
+ * arcs and DW samples. Resources could be reused then.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_parser_profiles *prf =
+ &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+
+ if (prf->obj)
+ mlx5_devx_cmd_destroy(prf->obj);
+ prf->obj = NULL;
+}
+
+/*
+ * Allocate Rx and Tx UARs in robust fashion.
+ * This routine handles the following UAR allocation issues:
+ *
+ * - tries to allocate the UAR with the most appropriate memory
+ * mapping type from the ones supported by the host
+ *
+ * - tries to allocate the UAR with non-NULL base address
+ * OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as
+ * UAR base address if UAR was not the first object in the UAR page.
+ * It caused the PMD failure and we should try to get another UAR
+ * till we get the first one with non-NULL base address returned.
+ */
+static int
+mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
+ const struct mlx5_dev_config *config)
+{
+ uint32_t uar_mapping, retry;
+ int err = 0;
+
+ for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
+#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
+ /* Control the mapping type according to the settings. */
+ uar_mapping = (config->dbnc == MLX5_TXDB_NCACHED) ?
+ MLX5DV_UAR_ALLOC_TYPE_NC :
+ MLX5DV_UAR_ALLOC_TYPE_BF;
+#else
+ RTE_SET_USED(config);
+ /*
+ * It seems we have no way to control the memory mapping type
+ * for the UAR, the default "Write-Combining" type is supposed.
+ * The UAR initialization on queue creation queries the
+ * actual mapping type done by Verbs/kernel and setups the
+ * PMD datapath accordingly.
+ */
+ uar_mapping = 0;
+#endif
+ sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, uar_mapping);
+#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
+ if (!sh->tx_uar &&
+ uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
+ if (config->dbnc == MLX5_TXDB_CACHED ||
+ config->dbnc == MLX5_TXDB_HEURISTIC)
+ DRV_LOG(WARNING, "Devarg tx_db_nc setting "
+ "is not supported by DevX");
+ /*
+ * In some environments like virtual machine
+ * the Write Combining mapped might be not supported
+ * and UAR allocation fails. We try "Non-Cached"
+ * mapping for the case. The tx_burst routines take
+ * the UAR mapping type into account on UAR setup
+ * on queue creation.
+ */
+ DRV_LOG(WARNING, "Failed to allocate Tx DevX UAR (BF)");
+ uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
+ sh->tx_uar = mlx5_glue->devx_alloc_uar
+ (sh->ctx, uar_mapping);
+ } else if (!sh->tx_uar &&
+ uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
+ if (config->dbnc == MLX5_TXDB_NCACHED)
+ DRV_LOG(WARNING, "Devarg tx_db_nc settings "
+ "is not supported by DevX");
+ /*
+ * If Verbs/kernel does not support "Non-Cached"
+ * try the "Write-Combining".
+ */
+ DRV_LOG(WARNING, "Failed to allocate Tx DevX UAR (NC)");
+ uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
+ sh->tx_uar = mlx5_glue->devx_alloc_uar
+ (sh->ctx, uar_mapping);
+ }
+#endif
+ if (!sh->tx_uar) {
+ DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (BF/NC)");
+ err = ENOMEM;
+ goto exit;
+ }
+ if (sh->tx_uar->base_addr)
+ break;
+ /*
+ * The UARs are allocated by rdma_core within the
+ * IB device context, on context closure all UARs
+ * will be freed, should be no memory/object leakage.
+ */
+ DRV_LOG(WARNING, "Retrying to allocate Tx DevX UAR");
+ sh->tx_uar = NULL;
+ }
+ /* Check whether we finally succeeded with valid UAR allocation. */
+ if (!sh->tx_uar) {
+ DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (NULL base)");
+ err = ENOMEM;
+ goto exit;
+ }
+ for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
+ uar_mapping = 0;
+ sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
+ (sh->ctx, uar_mapping);
+#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
+ if (!sh->devx_rx_uar &&
+ uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
+ /*
+ * Rx UAR is used to control interrupts only,
+ * should be no datapath noticeable impact,
+ * can try "Non-Cached" mapping safely.
+ */
+ DRV_LOG(WARNING, "Failed to allocate Rx DevX UAR (BF)");
+ uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
+ sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
+ (sh->ctx, uar_mapping);
+ }
+#endif
+ if (!sh->devx_rx_uar) {
+ DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (BF/NC)");
+ err = ENOMEM;
+ goto exit;
+ }
+ if (sh->devx_rx_uar->base_addr)
+ break;
+ /*
+ * The UARs are allocated by rdma_core within the
+ * IB device context, on context closure all UARs
+ * will be freed, should be no memory/object leakage.
+ */
+ DRV_LOG(WARNING, "Retrying to allocate Rx DevX UAR");
+ sh->devx_rx_uar = NULL;
+ }
+ /* Check whether we finally succeeded with valid UAR allocation. */
+ if (!sh->devx_rx_uar) {
+ DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (NULL base)");
+ err = ENOMEM;
+ }
+exit:
+ return err;
+}
+
/**
- * Allocate shared IB device context. If there is multiport device the
+ * Allocate shared device context. If there is multiport device the
* master and representors will share this context, if there is single
- * port dedicated IB device, the context will be used by only given
+ * port dedicated device, the context will be used by only given
* port due to unification.
*
- * Routine first searches the context for the specified IB device name,
+ * Routine first searches the context for the specified device name,
* if found the shared context assumed and reference counter is incremented.
* If no context found the new one is created and initialized with specified
- * IB device context and parameters.
+ * device context and parameters.
*
* @param[in] spawn
- * Pointer to the IB device attributes (name, port, etc).
+ * Pointer to the device attributes (name, port, etc).
* @param[in] config
* Pointer to device configuration structure.
*
* otherwise NULL and rte_errno is set.
*/
struct mlx5_dev_ctx_shared *
-mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
- const struct mlx5_dev_config *config)
+mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
+ const struct mlx5_dev_config *config)
{
struct mlx5_dev_ctx_shared *sh;
int err = 0;
MLX5_ASSERT(spawn);
/* Secondary process should not create the shared context. */
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- pthread_mutex_lock(&mlx5_ibv_list_mutex);
+ pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
/* Search for IB context by device name. */
- LIST_FOREACH(sh, &mlx5_ibv_list, next) {
- if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) {
+ LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
+ if (!strcmp(sh->ibdev_name,
+ mlx5_os_get_dev_device_name(spawn->phys_dev))) {
sh->refcnt++;
goto exit;
}
}
/* No device found, we have to create new shared context. */
MLX5_ASSERT(spawn->max_port);
- sh = rte_zmalloc("ethdev shared ib context",
+ sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
sizeof(struct mlx5_dev_ctx_shared) +
spawn->max_port *
- sizeof(struct mlx5_ibv_shared_port),
- RTE_CACHE_LINE_SIZE);
+ sizeof(struct mlx5_dev_shared_port),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!sh) {
DRV_LOG(ERR, "shared context allocation failure");
rte_errno = ENOMEM;
err = ENOMEM;
goto error;
}
+ err = mlx5_alloc_rxtx_uars(sh, config);
+ if (err)
+ goto error;
+ MLX5_ASSERT(sh->tx_uar && sh->tx_uar->base_addr);
+ MLX5_ASSERT(sh->devx_rx_uar && sh->devx_rx_uar->base_addr);
}
sh->flow_id_pool = mlx5_flow_id_pool_alloc
((1 << HAIRPIN_FLOW_ID_BITS) - 1);
err = ENOMEM;
goto error;
}
+#ifndef RTE_ARCH_64
+ /* Initialize UAR access locks for 32bit implementations. */
+ rte_spinlock_init(&sh->uar_lock_cq);
+ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+ rte_spinlock_init(&sh->uar_lock[i]);
+#endif
/*
* Once the device is added to the list of memory event
* callback, its global MR cache table cannot be expanded
err = rte_errno;
goto error;
}
+ mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
+ &sh->share_cache.dereg_mr_cb);
mlx5_os_dev_shared_handler_install(sh);
+ sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
+ if (!sh->cnt_id_tbl) {
+ err = rte_errno;
+ goto error;
+ }
mlx5_flow_aging_init(sh);
mlx5_flow_counters_mng_init(sh);
mlx5_flow_ipool_create(sh, config);
sh, mem_event_cb);
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
/* Add context to the global device list. */
- LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
+ LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
exit:
- pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+ pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
return sh;
error:
- pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+ pthread_mutex_destroy(&sh->txpp.mutex);
+ pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
+ if (sh->cnt_id_tbl)
+ mlx5_l3t_destroy(sh->cnt_id_tbl);
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
claim_zero(mlx5_devx_cmd_destroy(sh->td));
+ if (sh->devx_rx_uar)
+ mlx5_glue->devx_free_uar(sh->devx_rx_uar);
+ if (sh->tx_uar)
+ mlx5_glue->devx_free_uar(sh->tx_uar);
if (sh->pd)
claim_zero(mlx5_glue->dealloc_pd(sh->pd));
if (sh->ctx)
claim_zero(mlx5_glue->close_device(sh->ctx));
if (sh->flow_id_pool)
mlx5_flow_id_pool_release(sh->flow_id_pool);
- rte_free(sh);
+ mlx5_free(sh);
MLX5_ASSERT(err > 0);
rte_errno = err;
return NULL;
* Pointer to mlx5_dev_ctx_shared object to free
*/
void
-mlx5_free_shared_ibctx(struct mlx5_dev_ctx_shared *sh)
+mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
{
- pthread_mutex_lock(&mlx5_ibv_list_mutex);
+ pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Check the object presence in the list. */
struct mlx5_dev_ctx_shared *lctx;
- LIST_FOREACH(lctx, &mlx5_ibv_list, next)
+ LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next)
if (lctx == sh)
break;
MLX5_ASSERT(lctx);
mlx5_mr_release_cache(&sh->share_cache);
/* Remove context from the global device list. */
LIST_REMOVE(sh, next);
+ pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
/*
* Ensure there is no async event handler installed.
* Only primary process handles async device events.
mlx5_flow_counters_mng_close(sh);
mlx5_flow_ipool_destroy(sh);
mlx5_os_dev_shared_handler_uninstall(sh);
+ if (sh->cnt_id_tbl) {
+ mlx5_l3t_destroy(sh->cnt_id_tbl);
+ sh->cnt_id_tbl = NULL;
+ }
+ if (sh->tx_uar) {
+ mlx5_glue->devx_free_uar(sh->tx_uar);
+ sh->tx_uar = NULL;
+ }
if (sh->pd)
claim_zero(mlx5_glue->dealloc_pd(sh->pd));
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
claim_zero(mlx5_devx_cmd_destroy(sh->td));
+ if (sh->devx_rx_uar)
+ mlx5_glue->devx_free_uar(sh->devx_rx_uar);
if (sh->ctx)
claim_zero(mlx5_glue->close_device(sh->ctx));
if (sh->flow_id_pool)
mlx5_flow_id_pool_release(sh->flow_id_pool);
- rte_free(sh);
+ pthread_mutex_destroy(&sh->txpp.mutex);
+ mlx5_free(sh);
+ return;
exit:
- pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+ pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
}
/**
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
- rte_free(tbl_data);
+ mlx5_free(tbl_data);
}
table_key.direction = 1;
pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
- rte_free(tbl_data);
+ mlx5_free(tbl_data);
}
table_key.direction = 0;
table_key.domain = 1;
entry);
MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
- rte_free(tbl_data);
+ mlx5_free(tbl_data);
}
mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
}
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
if (!sh->flow_tbls) {
- DRV_LOG(ERR, "flow tables with hash creation failed.\n");
+ DRV_LOG(ERR, "flow tables with hash creation failed.");
err = ENOMEM;
return err;
}
.direction = 0,
}
};
- struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL,
- sizeof(*tbl_data), 0);
+ struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*tbl_data), 0,
+ SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
rte_atomic32_init(&tbl_data->tbl.refcnt);
rte_atomic32_inc(&tbl_data->tbl.refcnt);
table_key.direction = 1;
- tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+ tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
+ SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
goto error;
rte_atomic32_inc(&tbl_data->tbl.refcnt);
table_key.direction = 0;
table_key.domain = 1;
- tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+ tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
+ SOCKET_ID_ANY);
if (!tbl_data) {
err = ENOMEM;
goto error;
return err;
}
-/**
- * Initialize shared data between primary and secondary process.
- *
- * A memzone is reserved by primary process and secondary processes attach to
- * the memzone.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_init_shared_data(void)
-{
- const struct rte_memzone *mz;
- int ret = 0;
-
- rte_spinlock_lock(&mlx5_shared_data_lock);
- if (mlx5_shared_data == NULL) {
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- /* Allocate shared memory. */
- mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
- sizeof(*mlx5_shared_data),
- SOCKET_ID_ANY, 0);
- if (mz == NULL) {
- DRV_LOG(ERR,
- "Cannot allocate mlx5 shared data");
- ret = -rte_errno;
- goto error;
- }
- mlx5_shared_data = mz->addr;
- memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
- rte_spinlock_init(&mlx5_shared_data->lock);
- } else {
- /* Lookup allocated shared memory. */
- mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
- if (mz == NULL) {
- DRV_LOG(ERR,
- "Cannot attach mlx5 shared data");
- ret = -rte_errno;
- goto error;
- }
- mlx5_shared_data = mz->addr;
- memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
- }
- }
-error:
- rte_spinlock_unlock(&mlx5_shared_data_lock);
- return ret;
-}
-
/**
* Retrieve integer value from environment variable.
*
*/
ppriv_size =
sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
- ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size,
- RTE_CACHE_LINE_SIZE, dev->device->numa_node);
+ ppriv = mlx5_malloc(MLX5_MEM_RTE, ppriv_size, RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
if (!ppriv) {
rte_errno = ENOMEM;
return -rte_errno;
{
if (!dev->process_private)
return;
- rte_free(dev->process_private);
+ mlx5_free(dev->process_private);
dev->process_private = NULL;
}
dev->tx_pkt_burst = removed_tx_burst;
rte_wmb();
/* Disable datapath on secondary process. */
- mlx5_mp_req_stop_rxtx(dev);
+ mlx5_mp_os_req_stop_rxtx(dev);
+ /* Free the eCPRI flex parser resource. */
+ mlx5_flex_parser_ecpri_release(dev);
if (priv->rxqs != NULL) {
/* XXX race condition if mlx5_rx_burst() is still running. */
usleep(1000);
mlx5_mprq_free_mp(dev);
mlx5_os_free_shared_dr(priv);
if (priv->rss_conf.rss_key != NULL)
- rte_free(priv->rss_conf.rss_key);
+ mlx5_free(priv->rss_conf.rss_key);
if (priv->reta_idx != NULL)
- rte_free(priv->reta_idx);
+ mlx5_free(priv->reta_idx);
if (priv->config.vf)
mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
dev->data->mac_addrs,
* mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
* ifindex if Netlink fails.
*/
- mlx5_free_shared_ibctx(priv->sh);
+ mlx5_free_shared_dev_ctx(priv->sh);
if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
unsigned int c = 0;
uint16_t port_id;
dev->data->mac_addrs = NULL;
}
-const struct eth_dev_ops mlx5_dev_ops = {
- .dev_configure = mlx5_dev_configure,
- .dev_start = mlx5_dev_start,
- .dev_stop = mlx5_dev_stop,
- .dev_set_link_down = mlx5_set_link_down,
- .dev_set_link_up = mlx5_set_link_up,
- .dev_close = mlx5_dev_close,
- .promiscuous_enable = mlx5_promiscuous_enable,
- .promiscuous_disable = mlx5_promiscuous_disable,
- .allmulticast_enable = mlx5_allmulticast_enable,
- .allmulticast_disable = mlx5_allmulticast_disable,
- .link_update = mlx5_link_update,
- .stats_get = mlx5_stats_get,
- .stats_reset = mlx5_stats_reset,
- .xstats_get = mlx5_xstats_get,
- .xstats_reset = mlx5_xstats_reset,
- .xstats_get_names = mlx5_xstats_get_names,
- .fw_version_get = mlx5_fw_version_get,
- .dev_infos_get = mlx5_dev_infos_get,
- .read_clock = mlx5_read_clock,
- .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
- .vlan_filter_set = mlx5_vlan_filter_set,
- .rx_queue_setup = mlx5_rx_queue_setup,
- .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
- .tx_queue_setup = mlx5_tx_queue_setup,
- .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
- .rx_queue_release = mlx5_rx_queue_release,
- .tx_queue_release = mlx5_tx_queue_release,
- .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
- .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
- .mac_addr_remove = mlx5_mac_addr_remove,
- .mac_addr_add = mlx5_mac_addr_add,
- .mac_addr_set = mlx5_mac_addr_set,
- .set_mc_addr_list = mlx5_set_mc_addr_list,
- .mtu_set = mlx5_dev_set_mtu,
- .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
- .vlan_offload_set = mlx5_vlan_offload_set,
- .reta_update = mlx5_dev_rss_reta_update,
- .reta_query = mlx5_dev_rss_reta_query,
- .rss_hash_update = mlx5_rss_hash_update,
- .rss_hash_conf_get = mlx5_rss_hash_conf_get,
- .filter_ctrl = mlx5_dev_filter_ctrl,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
- .rxq_info_get = mlx5_rxq_info_get,
- .txq_info_get = mlx5_txq_info_get,
- .rx_burst_mode_get = mlx5_rx_burst_mode_get,
- .tx_burst_mode_get = mlx5_tx_burst_mode_get,
- .rx_queue_count = mlx5_rx_queue_count,
- .rx_queue_intr_enable = mlx5_rx_intr_enable,
- .rx_queue_intr_disable = mlx5_rx_intr_disable,
- .is_removed = mlx5_is_removed,
- .udp_tunnel_port_add = mlx5_udp_tunnel_port_add,
- .get_module_info = mlx5_get_module_info,
- .get_module_eeprom = mlx5_get_module_eeprom,
- .hairpin_cap_get = mlx5_hairpin_cap_get,
- .mtr_ops_get = mlx5_flow_meter_ops_get,
-};
-
-/* Available operations from secondary process. */
-const struct eth_dev_ops mlx5_dev_sec_ops = {
- .stats_get = mlx5_stats_get,
- .stats_reset = mlx5_stats_reset,
- .xstats_get = mlx5_xstats_get,
- .xstats_reset = mlx5_xstats_reset,
- .xstats_get_names = mlx5_xstats_get_names,
- .fw_version_get = mlx5_fw_version_get,
- .dev_infos_get = mlx5_dev_infos_get,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
- .rxq_info_get = mlx5_rxq_info_get,
- .txq_info_get = mlx5_txq_info_get,
- .rx_burst_mode_get = mlx5_rx_burst_mode_get,
- .tx_burst_mode_get = mlx5_tx_burst_mode_get,
- .get_module_info = mlx5_get_module_info,
- .get_module_eeprom = mlx5_get_module_eeprom,
-};
-
-/* Available operations in flow isolated mode. */
-const struct eth_dev_ops mlx5_dev_ops_isolate = {
- .dev_configure = mlx5_dev_configure,
- .dev_start = mlx5_dev_start,
- .dev_stop = mlx5_dev_stop,
- .dev_set_link_down = mlx5_set_link_down,
- .dev_set_link_up = mlx5_set_link_up,
- .dev_close = mlx5_dev_close,
- .promiscuous_enable = mlx5_promiscuous_enable,
- .promiscuous_disable = mlx5_promiscuous_disable,
- .allmulticast_enable = mlx5_allmulticast_enable,
- .allmulticast_disable = mlx5_allmulticast_disable,
- .link_update = mlx5_link_update,
- .stats_get = mlx5_stats_get,
- .stats_reset = mlx5_stats_reset,
- .xstats_get = mlx5_xstats_get,
- .xstats_reset = mlx5_xstats_reset,
- .xstats_get_names = mlx5_xstats_get_names,
- .fw_version_get = mlx5_fw_version_get,
- .dev_infos_get = mlx5_dev_infos_get,
- .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
- .vlan_filter_set = mlx5_vlan_filter_set,
- .rx_queue_setup = mlx5_rx_queue_setup,
- .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
- .tx_queue_setup = mlx5_tx_queue_setup,
- .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
- .rx_queue_release = mlx5_rx_queue_release,
- .tx_queue_release = mlx5_tx_queue_release,
- .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
- .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
- .mac_addr_remove = mlx5_mac_addr_remove,
- .mac_addr_add = mlx5_mac_addr_add,
- .mac_addr_set = mlx5_mac_addr_set,
- .set_mc_addr_list = mlx5_set_mc_addr_list,
- .mtu_set = mlx5_dev_set_mtu,
- .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
- .vlan_offload_set = mlx5_vlan_offload_set,
- .filter_ctrl = mlx5_dev_filter_ctrl,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
- .rxq_info_get = mlx5_rxq_info_get,
- .txq_info_get = mlx5_txq_info_get,
- .rx_burst_mode_get = mlx5_rx_burst_mode_get,
- .tx_burst_mode_get = mlx5_tx_burst_mode_get,
- .rx_queue_intr_enable = mlx5_rx_intr_enable,
- .rx_queue_intr_disable = mlx5_rx_intr_disable,
- .is_removed = mlx5_is_removed,
- .get_module_info = mlx5_get_module_info,
- .get_module_eeprom = mlx5_get_module_eeprom,
- .hairpin_cap_get = mlx5_hairpin_cap_get,
- .mtr_ops_get = mlx5_flow_meter_ops_get,
-};
-
/**
* Verify and store value for device argument.
*
mlx5_args_check(const char *key, const char *val, void *opaque)
{
struct mlx5_dev_config *config = opaque;
- unsigned long tmp;
+ unsigned long mod;
+ signed long tmp;
/* No-op, port representors are processed in mlx5_dev_spawn(). */
if (!strcmp(MLX5_REPRESENTOR, key))
return 0;
errno = 0;
- tmp = strtoul(val, NULL, 0);
+ tmp = strtol(val, NULL, 0);
if (errno) {
rte_errno = errno;
DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
return -rte_errno;
}
+ if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) {
+ /* Negative values are acceptable for some keys only. */
+ rte_errno = EINVAL;
+ DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
+ return -rte_errno;
+ }
+ mod = tmp >= 0 ? tmp : -tmp;
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
config->cqe_comp = !!tmp;
} else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
config->txq_inline_mpw = tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
+ } else if (strcmp(MLX5_TX_PP, key) == 0) {
+ if (!mod) {
+ DRV_LOG(ERR, "Zero Tx packet pacing parameter");
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ config->tx_pp = tmp;
+ } else if (strcmp(MLX5_TX_SKEW, key) == 0) {
+ config->tx_skew = tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
config->rx_vec_en = !!tmp;
} else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
return -rte_errno;
}
config->dv_xmeta_en = tmp;
+ } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
+ config->lacp_by_user = !!tmp;
} else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
config->mr_ext_memseg_en = !!tmp;
} else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
return -rte_errno;
}
config->reclaim_mode = tmp;
+ } else if (strcmp(MLX5_SYS_MEM_EN, key) == 0) {
+ config->sys_mem_en = !!tmp;
+ } else if (strcmp(MLX5_DECAP_EN, key) == 0) {
+ config->decap_en = !!tmp;
} else {
DRV_LOG(WARNING, "%s: unknown parameter", key);
rte_errno = EINVAL;
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
MLX5_TX_DB_NC,
+ MLX5_TX_PP,
+ MLX5_TX_SKEW,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
MLX5_L3_VXLAN_EN,
MLX5_DV_ESW_EN,
MLX5_DV_FLOW_EN,
MLX5_DV_XMETA_EN,
+ MLX5_LACP_BY_USER,
MLX5_MR_EXT_MEMSEG_EN,
MLX5_REPRESENTOR,
MLX5_MAX_DUMP_FILES_NUM,
MLX5_CLASS_ARG_NAME,
MLX5_HP_BUF_SIZE,
MLX5_RECLAIM_MEM,
+ MLX5_SYS_MEM_EN,
+ MLX5_DECAP_EN,
NULL,
};
struct rte_kvargs *kvlist;
return 0;
}
-/**
- * PMD global initialization.
- *
- * Independent from individual device, this function initializes global
- * per-PMD data structures distinguishing primary and secondary processes.
- * Hence, each initialization is called once per a process.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_init_once(void)
-{
- struct mlx5_shared_data *sd;
- struct mlx5_local_data *ld = &mlx5_local_data;
- int ret = 0;
-
- if (mlx5_init_shared_data())
- return -rte_errno;
- sd = mlx5_shared_data;
- MLX5_ASSERT(sd);
- rte_spinlock_lock(&sd->lock);
- switch (rte_eal_process_type()) {
- case RTE_PROC_PRIMARY:
- if (sd->init_done)
- break;
- LIST_INIT(&sd->mem_event_cb_list);
- rte_rwlock_init(&sd->mem_event_rwlock);
- rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
- mlx5_mr_mem_event_cb, NULL);
- ret = mlx5_mp_init_primary(MLX5_MP_NAME,
- mlx5_mp_primary_handle);
- if (ret)
- goto out;
- sd->init_done = true;
- break;
- case RTE_PROC_SECONDARY:
- if (ld->init_done)
- break;
- ret = mlx5_mp_init_secondary(MLX5_MP_NAME,
- mlx5_mp_secondary_handle);
- if (ret)
- goto out;
- ++sd->secondary_cnt;
- ld->init_done = true;
- break;
- default:
- break;
- }
-out:
- rte_spinlock_unlock(&sd->lock);
- return ret;
-}
-
/**
* Configures the minimal amount of data to inline into WQE
* while sending packets.
DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask);
}
-/**
- * Allocate page of door-bells and register it using DevX API.
- *
- * @param [in] dev
- * Pointer to Ethernet device.
- *
- * @return
- * Pointer to new page on success, NULL otherwise.
- */
-static struct mlx5_devx_dbr_page *
-mlx5_alloc_dbr_page(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_devx_dbr_page *page;
-
- /* Allocate space for door-bell page and management data. */
- page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page),
- RTE_CACHE_LINE_SIZE, dev->device->numa_node);
- if (!page) {
- DRV_LOG(ERR, "port %u cannot allocate dbr page",
- dev->data->port_id);
- return NULL;
- }
- /* Register allocated memory. */
- page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs,
- MLX5_DBR_PAGE_SIZE, 0);
- if (!page->umem) {
- DRV_LOG(ERR, "port %u cannot umem reg dbr page",
- dev->data->port_id);
- rte_free(page);
- return NULL;
- }
- return page;
-}
-
-/**
- * Find the next available door-bell, allocate new page if needed.
- *
- * @param [in] dev
- * Pointer to Ethernet device.
- * @param [out] dbr_page
- * Door-bell page containing the page data.
- *
- * @return
- * Door-bell address offset on success, a negative error value otherwise.
- */
-int64_t
-mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_devx_dbr_page *page = NULL;
- uint32_t i, j;
-
- LIST_FOREACH(page, &priv->dbrpgs, next)
- if (page->dbr_count < MLX5_DBR_PER_PAGE)
- break;
- if (!page) { /* No page with free door-bell exists. */
- page = mlx5_alloc_dbr_page(dev);
- if (!page) /* Failed to allocate new page. */
- return (-1);
- LIST_INSERT_HEAD(&priv->dbrpgs, page, next);
- }
- /* Loop to find bitmap part with clear bit. */
- for (i = 0;
- i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX;
- i++)
- ; /* Empty. */
- /* Find the first clear bit. */
- MLX5_ASSERT(i < MLX5_DBR_BITMAP_SIZE);
- j = rte_bsf64(~page->dbr_bitmap[i]);
- page->dbr_bitmap[i] |= (UINT64_C(1) << j);
- page->dbr_count++;
- *dbr_page = page;
- return (((i * 64) + j) * sizeof(uint64_t));
-}
-
-/**
- * Release a door-bell record.
- *
- * @param [in] dev
- * Pointer to Ethernet device.
- * @param [in] umem_id
- * UMEM ID of page containing the door-bell record to release.
- * @param [in] offset
- * Offset of door-bell record in page.
- *
- * @return
- * 0 on success, a negative error value otherwise.
- */
-int32_t
-mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_devx_dbr_page *page = NULL;
- int ret = 0;
-
- LIST_FOREACH(page, &priv->dbrpgs, next)
- /* Find the page this address belongs to. */
- if (mlx5_os_get_umem_id(page->umem) == umem_id)
- break;
- if (!page)
- return -EINVAL;
- page->dbr_count--;
- if (!page->dbr_count) {
- /* Page not used, free it and remove from list. */
- LIST_REMOVE(page, next);
- if (page->umem)
- ret = -mlx5_glue->devx_umem_dereg(page->umem);
- rte_free(page);
- } else {
- /* Mark in bitmap that this door-bell is not in use. */
- offset /= MLX5_DBR_SIZE;
- int i = offset / 64;
- int j = offset % 64;
-
- page->dbr_bitmap[i] &= ~(UINT64_C(1) << j);
- }
- return ret;
-}
-
int
rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
{
static const char *const dynf_names[] = {
RTE_PMD_MLX5_FINE_GRANULARITY_INLINE,
- RTE_MBUF_DYNFLAG_METADATA_NAME
+ RTE_MBUF_DYNFLAG_METADATA_NAME,
+ RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME
};
unsigned int i;
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
},
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6LX)
+ },
{
.vendor_id = 0
}
};
-struct rte_pci_driver mlx5_driver = {
- .driver = {
- .name = MLX5_DRIVER_NAME
+static struct mlx5_pci_driver mlx5_driver = {
+ .driver_class = MLX5_CLASS_NET,
+ .pci_driver = {
+ .driver = {
+ .name = MLX5_DRIVER_NAME,
+ },
+ .id_table = mlx5_pci_id_map,
+ .probe = mlx5_os_pci_probe,
+ .remove = mlx5_pci_remove,
+ .dma_map = mlx5_dma_map,
+ .dma_unmap = mlx5_dma_unmap,
+ .drv_flags = PCI_DRV_FLAGS,
},
- .id_table = mlx5_pci_id_map,
- .probe = mlx5_os_pci_probe,
- .remove = mlx5_pci_remove,
- .dma_map = mlx5_dma_map,
- .dma_unmap = mlx5_dma_unmap,
- .drv_flags = PCI_DRV_FLAGS,
};
+/* Initialize driver log type. */
+RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE)
+
/**
* Driver initialization routine.
*/
RTE_INIT(rte_mlx5_pmd_init)
{
- /* Initialize driver log type. */
- mlx5_logtype = rte_log_register("pmd.net.mlx5");
- if (mlx5_logtype >= 0)
- rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
-
+ mlx5_common_init();
/* Build the static tables for Verbs conversion. */
mlx5_set_ptype_table();
mlx5_set_cksum_table();
mlx5_set_swp_types_table();
if (mlx5_glue)
- rte_pci_register(&mlx5_driver);
+ mlx5_pci_driver_register(&mlx5_driver);
}
RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);