Enabled by default.
+- ``mr_mempool_reg_en`` parameter [int]
+
+ A nonzero value enables implicit registration of DMA memory of all mempools
+ except those having ``MEMPOOL_F_NON_IO``. This flag is set automatically
+ for mempools populated with non-contiguous objects or those without IOVA.
+ The effect is that when a packet from a mempool is transmitted,
+ its memory is already registered for DMA in the PMD and no registration
+ will happen on the data path. The tradeoff is extra work on the creation
+ of each mempool and increased HW resource use if some mempools
+ are not used with MLX5 devices.
+
+ Enabled by default.
+
- ``representor`` parameter [list]
This parameter can be used to instantiate DPDK Ethernet devices from
* Added support for Inline IPsec for CN9K event mode and CN10K
poll mode and event mode.
+* **Updated Mellanox mlx5 driver.**
+
+ Updated the Mellanox mlx5 driver with new features and improvements, including:
+
+ * Added implicit mempool registration to avoid data path hiccups (opt-out).
+
* **Updated Marvell cnxk crypto PMD.**
* Added AES-CBC SHA1-HMAC support in lookaside protocol (IPsec) for CN10K.
#include "mlx5_tx.h"
#include "mlx5_utils.h"
+/**
+ * Handle a port-agnostic message.
+ *
+ * @return
+ * 0 on success, 1 when message is not port-agnostic, (-1) on error.
+ */
+static int
+mlx5_mp_os_handle_port_agnostic(const struct rte_mp_msg *mp_msg,
+ const void *peer)
+{
+ struct rte_mp_msg mp_res;
+ struct mlx5_mp_param *res = (struct mlx5_mp_param *)mp_res.param;
+ const struct mlx5_mp_param *param =
+ (const struct mlx5_mp_param *)mp_msg->param;
+ const struct mlx5_mp_arg_mempool_reg *mpr;
+ struct mlx5_mp_id mp_id;
+
+ switch (param->type) {
+ case MLX5_MP_REQ_MEMPOOL_REGISTER:
+ mlx5_mp_id_init(&mp_id, param->port_id);
+ mp_init_msg(&mp_id, &mp_res, param->type);
+ mpr = ¶m->args.mempool_reg;
+ res->result = mlx5_mr_mempool_register(mpr->share_cache,
+ mpr->pd, mpr->mempool,
+ NULL);
+ return rte_mp_reply(&mp_res, peer);
+ case MLX5_MP_REQ_MEMPOOL_UNREGISTER:
+ mlx5_mp_id_init(&mp_id, param->port_id);
+ mp_init_msg(&mp_id, &mp_res, param->type);
+ mpr = ¶m->args.mempool_reg;
+ res->result = mlx5_mr_mempool_unregister(mpr->share_cache,
+ mpr->mempool, NULL);
+ return rte_mp_reply(&mp_res, peer);
+ default:
+ return 1;
+ }
+ return -1;
+}
+
int
mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
{
int ret;
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ /* Port-agnostic messages. */
+ ret = mlx5_mp_os_handle_port_agnostic(mp_msg, peer);
+ if (ret <= 0)
+ return ret;
+ /* Port-specific messages. */
if (!rte_eth_dev_is_valid_port(param->port_id)) {
rte_errno = ENODEV;
DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
err = mlx5_proc_priv_init(eth_dev);
if (err)
return NULL;
- mp_id.port_id = eth_dev->data->port_id;
- strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
+ mlx5_mp_id_init(&mp_id, eth_dev->data->port_id);
/* Receive command fd from primary process */
err = mlx5_mp_req_verbs_cmd_fd(&mp_id);
if (err < 0)
config->txqs_inline = MLX5_ARG_UNSET;
config->vf_nl_en = 1;
config->mr_ext_memseg_en = 1;
+ config->mr_mempool_reg_en = 1;
config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
config->dv_esw_en = 1;
/* Device parameter to configure allow or prevent duplicate rules pattern. */
#define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern"
+/* Device parameter to configure implicit registration of mempool memory. */
+#define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en"
+
/* Shared memory between primary and secondary processes. */
struct mlx5_shared_data *mlx5_shared_data;
return err;
}
+/**
+ * Unregister the mempool from the protection domain.
+ *
+ * @param sh
+ * Pointer to the device shared context.
+ * @param mp
+ * Mempool being unregistered.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
+ struct rte_mempool *mp)
+{
+ struct mlx5_mp_id mp_id;
+
+ mlx5_mp_id_init(&mp_id, 0);
+ if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)
+ DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
+ mp->name, sh->pd, rte_strerror(rte_errno));
+}
+
+/**
+ * rte_mempool_walk() callback to register mempools
+ * for the protection domain.
+ *
+ * @param mp
+ * The mempool being walked.
+ * @param arg
+ * Pointer to the device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
+{
+ struct mlx5_dev_ctx_shared *sh = arg;
+ struct mlx5_mp_id mp_id;
+ int ret;
+
+ mlx5_mp_id_init(&mp_id, 0);
+ ret = mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp, &mp_id);
+ if (ret < 0 && rte_errno != EEXIST)
+ DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
+ mp->name, sh->pd, rte_strerror(rte_errno));
+}
+
+/**
+ * rte_mempool_walk() callback to unregister mempools
+ * from the protection domain.
+ *
+ * @param mp
+ * The mempool being walked.
+ * @param arg
+ * Pointer to the device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
+{
+ mlx5_dev_ctx_shared_mempool_unregister
+ ((struct mlx5_dev_ctx_shared *)arg, mp);
+}
+
+/**
+ * Mempool life cycle callback for Ethernet devices.
+ *
+ * @param event
+ * Mempool life cycle event.
+ * @param mp
+ * Associated mempool.
+ * @param arg
+ * Pointer to a device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
+ struct rte_mempool *mp, void *arg)
+{
+ struct mlx5_dev_ctx_shared *sh = arg;
+ struct mlx5_mp_id mp_id;
+
+ switch (event) {
+ case RTE_MEMPOOL_EVENT_READY:
+ mlx5_mp_id_init(&mp_id, 0);
+ if (mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp,
+ &mp_id) < 0)
+ DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
+ mp->name, sh->pd, rte_strerror(rte_errno));
+ break;
+ case RTE_MEMPOOL_EVENT_DESTROY:
+ mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+ break;
+ }
+}
+
+/**
+ * Callback used when implicit mempool registration is disabled
+ * in order to track Rx mempool destruction.
+ *
+ * @param event
+ * Mempool life cycle event.
+ * @param mp
+ * An Rx mempool registered explicitly when the port is started.
+ * @param arg
+ * Pointer to a device shared context.
+ */
+static void
+mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
+ struct rte_mempool *mp, void *arg)
+{
+ struct mlx5_dev_ctx_shared *sh = arg;
+
+ if (event == RTE_MEMPOOL_EVENT_DESTROY)
+ mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+}
+
+int
+mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ int ret;
+
+ /* Check if we only need to track Rx mempool destruction. */
+ if (!priv->config.mr_mempool_reg_en) {
+ ret = rte_mempool_event_callback_register
+ (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
+ return ret == 0 || rte_errno == EEXIST ? 0 : ret;
+ }
+ /* Callback for this shared context may be already registered. */
+ ret = rte_mempool_event_callback_register
+ (mlx5_dev_ctx_shared_mempool_event_cb, sh);
+ if (ret != 0 && rte_errno != EEXIST)
+ return ret;
+ /* Register mempools only once for this shared context. */
+ if (ret == 0)
+ rte_mempool_walk(mlx5_dev_ctx_shared_mempool_register_cb, sh);
+ return 0;
+}
+
/**
* Allocate shared device context. If there is multiport device the
* master and representors will share this context, if there is single
void
mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
{
+ int ret;
+
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Check the object presence in the list. */
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
if (--sh->refcnt)
goto exit;
+ /* Stop watching for mempool events and unregister all mempools. */
+ ret = rte_mempool_event_callback_unregister
+ (mlx5_dev_ctx_shared_mempool_event_cb, sh);
+ if (ret < 0 && rte_errno == ENOENT)
+ ret = rte_mempool_event_callback_unregister
+ (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
+ if (ret == 0)
+ rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
+ sh);
/* Remove from memory callback device list. */
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
LIST_REMOVE(sh, mem_event_cb);
config->decap_en = !!tmp;
} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
config->allow_duplicate_pattern = !!tmp;
+ } else if (strcmp(MLX5_MR_MEMPOOL_REG_EN, key) == 0) {
+ config->mr_mempool_reg_en = !!tmp;
} else {
DRV_LOG(WARNING, "%s: unknown parameter", key);
rte_errno = EINVAL;
MLX5_SYS_MEM_EN,
MLX5_DECAP_EN,
MLX5_ALLOW_DUPLICATE_PATTERN,
+ MLX5_MR_MEMPOOL_REG_EN,
NULL,
};
struct rte_kvargs *kvlist;
/** Key string for IPC. */
#define MLX5_MP_NAME "net_mlx5_mp"
+/** Initialize a multi-process ID. */
+static inline void
+mlx5_mp_id_init(struct mlx5_mp_id *mp_id, uint16_t port_id)
+{
+ mp_id->port_id = port_id;
+ strlcpy(mp_id->name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
+}
LIST_HEAD(mlx5_dev_list, mlx5_dev_ctx_shared);
unsigned int dv_miss_info:1; /* restore packet after partial hw miss */
unsigned int allow_duplicate_pattern:1;
/* Allow/Prevent the duplicate rules pattern. */
+ unsigned int mr_mempool_reg_en:1;
+ /* Allow/prevent implicit mempool memory registration. */
struct {
unsigned int enabled:1; /* Whether MPRQ is enabled. */
unsigned int stride_num_n; /* Number of strides. */
mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
const struct mlx5_dev_config *config);
void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
+int mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev);
void mlx5_free_table_hash_list(struct mlx5_priv *priv);
int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
void mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
}
}
-/**
- * Bottom-half of LKey search on Rx.
- *
- * @param rxq
- * Pointer to Rx queue structure.
- * @param addr
- * Search key.
- *
- * @return
- * Searched LKey on success, UINT32_MAX on no match.
- */
-uint32_t
-mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
-{
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
- struct mlx5_priv *priv = rxq_ctrl->priv;
-
- return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
- &priv->sh->share_cache, mr_ctrl, addr,
- priv->config.mr_ext_memseg_en);
-}
-
/**
* Bottom-half of LKey search on Tx.
*
uint32_t
mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
{
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct mlx5_priv *priv = txq_ctrl->priv;
uintptr_t addr = (uintptr_t)mb->buf_addr;
uint32_t lkey;
+ if (priv->config.mr_mempool_reg_en) {
+ struct rte_mempool *mp = NULL;
+ struct mlx5_mprq_buf *buf;
+
+ if (!RTE_MBUF_HAS_EXTBUF(mb)) {
+ mp = mlx5_mb2mp(mb);
+ } else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
+ /* Recover MPRQ mempool. */
+ buf = mb->shinfo->fcb_opaque;
+ mp = buf->mp;
+ }
+ if (mp != NULL) {
+ lkey = mlx5_mr_mempool2mr_bh(&priv->sh->share_cache,
+ mr_ctrl, mp, addr);
+ /*
+ * Lookup can only fail on invalid input, e.g. "addr"
+ * is not from "mp" or "mp" has MEMPOOL_F_NON_IO set.
+ */
+ if (lkey != UINT32_MAX)
+ return lkey;
+ }
+ /* Fallback for generic mechanism in corner cases. */
+ }
lkey = mlx5_tx_addr2mr_bh(txq, addr);
if (lkey == UINT32_MAX && rte_errno == ENXIO) {
/* Mempool may have externally allocated memory. */
mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
return mlx5_tx_addr2mr_bh(txq, addr);
}
-
-/* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
-static void
-mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
- struct rte_mempool_memhdr *memhdr,
- unsigned mem_idx __rte_unused)
-{
- struct mr_update_mp_data *data = opaque;
- struct rte_eth_dev *dev = data->dev;
- struct mlx5_priv *priv = dev->data->dev_private;
-
- uint32_t lkey;
-
- /* Stop iteration if failed in the previous walk. */
- if (data->ret < 0)
- return;
- /* Register address of the chunk and update local caches. */
- lkey = mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
- &priv->sh->share_cache, data->mr_ctrl,
- (uintptr_t)memhdr->addr,
- priv->config.mr_ext_memseg_en);
- if (lkey == UINT32_MAX)
- data->ret = -1;
-}
-
-/**
- * Register entire memory chunks in a Mempool.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param mr_ctrl
- * Pointer to per-queue MR control structure.
- * @param mp
- * Pointer to registering Mempool.
- *
- * @return
- * 0 on success, -1 on failure.
- */
-int
-mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
- struct rte_mempool *mp)
-{
- struct mr_update_mp_data data = {
- .dev = dev,
- .mr_ctrl = mr_ctrl,
- .ret = 0,
- };
- uint32_t flags = rte_pktmbuf_priv_flags(mp);
-
- if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) {
- /*
- * The pinned external buffer should be registered for DMA
- * operations by application. The mem_list of the pool contains
- * the list of chunks with mbuf structures w/o built-in data
- * buffers and DMA actually does not happen there, no need
- * to create MR for these chunks.
- */
- return 0;
- }
- DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
- "having %u chunks.", dev->data->port_id,
- mp->name, mp->nb_mem_chunks);
- rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
- if (data.ret < 0 && rte_errno == ENXIO) {
- /* Mempool may have externally allocated memory. */
- return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);
- }
- return data.ret;
-}
void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg);
-int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
- struct rte_mempool *mp);
#endif /* RTE_PMD_MLX5_MR_H_ */
uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
uint16_t pkts_n);
-/* mlx5_mr.c */
-
-uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
+static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
/**
- * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
- * as mempool is pre-configured and static.
+ * Query LKey from a packet buffer for Rx. No need to flush local caches
+ * as the Rx mempool database entries are valid for the lifetime of the queue.
*
* @param rxq
* Pointer to Rx queue structure.
*
* @return
* Searched LKey on success, UINT32_MAX on no match.
+ * This function always succeeds on valid input.
*/
static __rte_always_inline uint32_t
mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
{
struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct rte_mempool *mp;
uint32_t lkey;
/* Linear search on MR cache array. */
MLX5_MR_CACHE_N, addr);
if (likely(lkey != UINT32_MAX))
return lkey;
- /* Take slower bottom-half (Binary Search) on miss. */
- return mlx5_rx_addr2mr_bh(rxq, addr);
+ /*
+ * Slower search in the mempool database on miss.
+ * During queue creation rxq->sh is not yet set, so we use rxq_ctrl.
+ */
+ rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
+ return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->share_cache,
+ mr_ctrl, mp, addr);
}
#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
unsigned int strd_sz_n = 0;
unsigned int i;
unsigned int n_ibv = 0;
+ int ret;
if (!mlx5_mprq_enabled(dev))
return 0;
rte_errno = ENOMEM;
return -rte_errno;
}
+ ret = mlx5_mr_mempool_register(&priv->sh->share_cache, priv->sh->pd,
+ mp, &priv->mp_id);
+ if (ret < 0 && rte_errno != EEXIST) {
+ ret = rte_errno;
+ DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
+ dev->data->port_id);
+ rte_mempool_free(mp);
+ rte_errno = ret;
+ return -rte_errno;
+ }
priv->mprq_mp = mp;
exit:
/* Set mempool for each Rx queue. */
/* rte_errno is already set. */
goto error;
}
+ /* Rx queues don't use this pointer, but we want a valid structure. */
+ tmpl->rxq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
tmpl->socket = socket;
if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
return -rte_errno;
}
+/**
+ * Translate the chunk address to MR key in order to put in into the cache.
+ */
+static void
+mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned int idx)
+{
+ struct mlx5_rxq_data *rxq = opaque;
+
+ RTE_SET_USED(mp);
+ RTE_SET_USED(idx);
+ mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr);
+}
+
+/**
+ * Register Rx queue mempools and fill the Rx queue cache.
+ * This function tolerates repeated mempool registration.
+ *
+ * @param[in] rxq_ctrl
+ * Rx queue control data.
+ *
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+static int
+mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ struct mlx5_priv *priv = rxq_ctrl->priv;
+ struct rte_mempool *mp;
+ uint32_t s;
+ int ret = 0;
+
+ mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
+ /* MPRQ mempool is registered on creation, just fill the cache. */
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+ rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp,
+ mlx5_rxq_mempool_register_cb,
+ &rxq_ctrl->rxq);
+ return 0;
+ }
+ for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
+ mp = rxq_ctrl->rxq.rxseg[s].mp;
+ ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
+ priv->sh->pd, mp, &priv->mp_id);
+ if (ret < 0 && rte_errno != EEXIST)
+ return ret;
+ rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
+ &rxq_ctrl->rxq);
+ }
+ return 0;
+}
+
/**
* Stop traffic on Rx queues.
*
if (!rxq_ctrl)
continue;
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
- /* Pre-register Rx mempools. */
- if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
- mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
- rxq_ctrl->rxq.mprq_mp);
- } else {
- uint32_t s;
-
- for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
- mlx5_mr_update_mp
- (dev, &rxq_ctrl->rxq.mr_ctrl,
- rxq_ctrl->rxq.rxseg[s].mp);
- }
+ /*
+ * Pre-register the mempools. Regardless of whether
+ * the implicit registration is enabled or not,
+ * Rx mempool destruction is tracked to free MRs.
+ */
+ if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
+ goto error;
ret = rxq_alloc_elts(rxq_ctrl);
if (ret)
goto error;
dev->data->port_id, strerror(rte_errno));
goto error;
}
+ if (mlx5_dev_ctx_shared_mempool_subscribe(dev) != 0) {
+ DRV_LOG(ERR, "port %u failed to subscribe for mempool life cycle: %s",
+ dev->data->port_id, rte_strerror(rte_errno));
+ goto error;
+ }
rte_wmb();
dev->tx_pkt_burst = mlx5_select_tx_function(dev);
dev->rx_pkt_burst = mlx5_select_rx_function(dev);
dev_config.txqs_inline = MLX5_ARG_UNSET;
dev_config.vf_nl_en = 0;
dev_config.mr_ext_memseg_en = 1;
+ dev_config.mr_mempool_reg_en = 1;
dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
dev_config.dv_esw_en = 0;