#include "mlx5_common.h"
#include "mlx5_common_os.h"
+#include "mlx5_common_mp.h"
#include "mlx5_common_log.h"
#include "mlx5_common_defs.h"
#include "mlx5_common_private.h"
#endif
}
+/**
+ * Register the mempool for the protection domain.
+ *
+ * @param cdev
+ * Pointer to the mlx5 common device.
+ * @param mp
+ * Mempool being registered.
+ *
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+static int
+mlx5_dev_mempool_register(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp)
+{
+ struct mlx5_mp_id mp_id;
+
+ mlx5_mp_id_init(&mp_id, 0);
+ return mlx5_mr_mempool_register(&cdev->mr_scache, cdev->pd, mp, &mp_id);
+}
+
+/**
+ * Unregister the mempool from the protection domain.
+ *
+ * @param cdev
+ * Pointer to the mlx5 common device.
+ * @param mp
+ * Mempool being unregistered.
+ */
+void
+mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp)
+{
+ struct mlx5_mp_id mp_id;
+
+ mlx5_mp_id_init(&mp_id, 0);
+ if (mlx5_mr_mempool_unregister(&cdev->mr_scache, mp, &mp_id) < 0)
+ DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
+ mp->name, cdev->pd, rte_strerror(rte_errno));
+}
+
+/**
+ * rte_mempool_walk() callback to register mempools for the protection domain.
+ *
+ * @param mp
+ * The mempool being walked.
+ * @param arg
+ * Pointer to the device shared context.
+ */
+static void
+mlx5_dev_mempool_register_cb(struct rte_mempool *mp, void *arg)
+{
+ struct mlx5_common_device *cdev = arg;
+ int ret;
+
+ ret = mlx5_dev_mempool_register(cdev, mp);
+ if (ret < 0 && rte_errno != EEXIST)
+ DRV_LOG(ERR,
+ "Failed to register existing mempool %s for PD %p: %s",
+ mp->name, cdev->pd, rte_strerror(rte_errno));
+}
+
+/**
+ * rte_mempool_walk() callback to unregister mempools
+ * from the protection domain.
+ *
+ * @param mp
+ * The mempool being walked.
+ * @param arg
+ * Pointer to the device shared context.
+ */
+static void
+mlx5_dev_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
+{
+ mlx5_dev_mempool_unregister((struct mlx5_common_device *)arg, mp);
+}
+
+/**
+ * Mempool life cycle callback for mlx5 common devices.
+ *
+ * @param event
+ * Mempool life cycle event.
+ * @param mp
+ * Associated mempool.
+ * @param arg
+ * Pointer to a device shared context.
+ */
+static void
+mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp,
+ void *arg)
+{
+ struct mlx5_common_device *cdev = arg;
+
+ switch (event) {
+ case RTE_MEMPOOL_EVENT_READY:
+ if (mlx5_dev_mempool_register(cdev, mp) < 0)
+ DRV_LOG(ERR,
+ "Failed to register new mempool %s for PD %p: %s",
+ mp->name, cdev->pd, rte_strerror(rte_errno));
+ break;
+ case RTE_MEMPOOL_EVENT_DESTROY:
+ mlx5_dev_mempool_unregister(cdev, mp);
+ break;
+ }
+}
+
+int
+mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev)
+{
+ int ret = 0;
+
+ if (!cdev->config.mr_mempool_reg_en)
+ return 0;
+ rte_rwlock_write_lock(&cdev->mr_scache.mprwlock);
+ if (cdev->mr_scache.mp_cb_registered)
+ goto exit;
+ /* Callback for this device may be already registered. */
+ ret = rte_mempool_event_callback_register(mlx5_dev_mempool_event_cb,
+ cdev);
+ if (ret != 0 && rte_errno != EEXIST)
+ goto exit;
+ /* Register mempools only once for this device. */
+ if (ret == 0)
+ rte_mempool_walk(mlx5_dev_mempool_register_cb, cdev);
+ ret = 0;
+ cdev->mr_scache.mp_cb_registered = 1;
+exit:
+ rte_rwlock_write_unlock(&cdev->mr_scache.mprwlock);
+ return ret;
+}
+
+static void
+mlx5_dev_mempool_unsubscribe(struct mlx5_common_device *cdev)
+{
+ int ret;
+
+ if (!cdev->mr_scache.mp_cb_registered ||
+ !cdev->config.mr_mempool_reg_en)
+ return;
+ /* Stop watching for mempool events and unregister all mempools. */
+ ret = rte_mempool_event_callback_unregister(mlx5_dev_mempool_event_cb,
+ cdev);
+ if (ret == 0)
+ rte_mempool_walk(mlx5_dev_mempool_unregister_cb, cdev);
+}
+
/**
* Callback for memory event.
*
if (TAILQ_EMPTY(&devices_list))
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
NULL);
+ mlx5_dev_mempool_unsubscribe(cdev);
mlx5_mr_release_cache(&cdev->mr_scache);
mlx5_dev_hw_global_release(cdev);
}
bool
mlx5_dev_is_pci(const struct rte_device *dev);
+__rte_internal
+int
+mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev);
+
+__rte_internal
+void
+mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp);
+
/* mlx5_common_mr.c */
__rte_internal
uint16_t port_id;
};
+/** Key string for IPC. */
+#define MLX5_MP_NAME "common_mlx5_mp"
+
+/** Initialize a multi-process ID. */
+static inline void
+mlx5_mp_id_init(struct mlx5_mp_id *mp_id, uint16_t port_id)
+{
+ mp_id->port_id = port_id;
+ strlcpy(mp_id->name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
+}
+
/** Request timeout for IPC. */
#define MLX5_MP_REQ_TIMEOUT_SEC 5
#include <rte_rwlock.h>
#include "mlx5_glue.h"
+#include "mlx5_common.h"
#include "mlx5_common_mp.h"
#include "mlx5_common_mr.h"
+#include "mlx5_common_os.h"
#include "mlx5_common_log.h"
#include "mlx5_malloc.h"
unsigned int mrs_n;
};
+void
+mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
+{
+ struct mlx5_mprq_buf *buf = opaque;
+
+ if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+ rte_mempool_put(buf->mp, buf);
+ } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
+ __ATOMIC_RELAXED) == 0)) {
+ __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ rte_mempool_put(buf->mp, buf);
+ }
+}
+
/**
* Expand B-tree table to a given size. Can't be called with holding
* memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
{
int ret;
+ if (mp_id == NULL) {
+ rte_errno = EINVAL;
+ return UINT32_MAX;
+ }
DRV_LOG(DEBUG, "port %u requesting MR creation for address (%p)",
mp_id->port_id, (void *)addr);
ret = mlx5_mp_req_mr_create(mp_id, addr);
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
-uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
- struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
- uintptr_t addr, unsigned int mr_ext_memseg_en)
+static uint32_t
+mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
+ struct mlx5_mr_share_cache *share_cache,
+ struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr,
+ unsigned int mr_ext_memseg_en)
{
uint32_t lkey;
uint16_t bh_idx = 0;
}
/**
- * Release all the created MRs and resources on global MR cache of a device.
+ * Release all the created MRs and resources on global MR cache of a device
* list.
*
* @param share_cache
mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
&share_cache->dereg_mr_cb);
rte_rwlock_init(&share_cache->rwlock);
+ rte_rwlock_init(&share_cache->mprwlock);
+ share_cache->mp_cb_registered = 0;
/* Initialize B-tree and allocate memory for global MR cache table. */
return mlx5_mr_btree_init(&share_cache->cache,
MLX5_MR_BTREE_CACHE_N * 2, socket);
/**
* Dump all the created MRs and the global cache entries.
*
- * @param sh
- * Pointer to Ethernet device shared context.
+ * @param share_cache
+ * Pointer to a global shared MR cache.
*/
void
mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
mpr = mlx5_mempool_reg_lookup(share_cache, mp);
if (mpr == NULL) {
mlx5_mempool_reg_attach(new_mpr);
- LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
- new_mpr, next);
+ LIST_INSERT_HEAD(&share_cache->mempool_reg_list, new_mpr, next);
ret = 0;
}
rte_rwlock_write_unlock(&share_cache->rwlock);
return lkey;
}
+/**
+ * Bottom-half of LKey search on. If supported, lookup for the address from
+ * the mempool. Otherwise, search in old mechanism caches.
+ *
+ * @param cdev
+ * Pointer to mlx5 device.
+ * @param mp_id
+ * Multi-process identifier, may be NULL for the primary process.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mb
+ * Pointer to mbuf.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx5_mr_mb2mr_bh(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
+ struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
+{
+ uint32_t lkey;
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+
+ if (cdev->config.mr_mempool_reg_en) {
+ struct rte_mempool *mp = NULL;
+ struct mlx5_mprq_buf *buf;
+
+ if (!RTE_MBUF_HAS_EXTBUF(mb)) {
+ mp = mlx5_mb2mp(mb);
+ } else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
+ /* Recover MPRQ mempool. */
+ buf = mb->shinfo->fcb_opaque;
+ mp = buf->mp;
+ }
+ if (mp != NULL) {
+ lkey = mlx5_mr_mempool2mr_bh(&cdev->mr_scache,
+ mr_ctrl, mp, addr);
+ /*
+ * Lookup can only fail on invalid input, e.g. "addr"
+ * is not from "mp" or "mp" has MEMPOOL_F_NON_IO set.
+ */
+ if (lkey != UINT32_MAX)
+ return lkey;
+ }
+ /* Fallback for generic mechanism in corner cases. */
+ }
+ return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl,
+ addr, cdev->config.mr_ext_memseg_en);
+}
+
/**
* Query LKey from a packet buffer.
*
struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)
{
uint32_t lkey;
- uintptr_t addr = (uintptr_t)mbuf->buf_addr;
/* Check generation bit to see if there's any change on existing MRs. */
if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
if (likely(lkey != UINT32_MAX))
return lkey;
/* Take slower bottom-half on miss. */
- return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl,
- addr, cdev->config.mr_ext_memseg_en);
+ return mlx5_mr_mb2mr_bh(cdev, mp_id, mr_ctrl, mbuf);
}
struct mlx5_mr_share_cache {
uint32_t dev_gen; /* Generation number to flush local caches. */
rte_rwlock_t rwlock; /* MR cache Lock. */
+ rte_rwlock_t mprwlock; /* Mempool Registration Lock. */
+ uint8_t mp_cb_registered; /* Mempool are Registered. */
struct mlx5_mr_btree cache; /* Global MR cache table. */
struct mlx5_mr_list mr_list; /* Registered MR list. */
struct mlx5_mr_list mr_free_list; /* Freed MR list. */
mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */
} __rte_packed;
+/* Multi-Packet RQ buffer header. */
+struct mlx5_mprq_buf {
+ struct rte_mempool *mp;
+ uint16_t refcnt; /* Atomically accessed refcnt. */
+ uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
+ struct rte_mbuf_ext_shared_info shinfos[];
+ /*
+ * Shared information per stride.
+ * More memory will be allocated for the first stride head-room and for
+ * the strides data.
+ */
+} __rte_cache_aligned;
+
+__rte_internal
+void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
+
+/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
+ * cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static inline struct rte_mempool *
+mlx5_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_CLONED(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
+
/**
* Look up LKey from given lookup table by linear search. Firstly look up the
* last-hit entry. If miss, the entire array is searched. If found, update the
void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
__rte_internal
-uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
- struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
- uintptr_t addr, unsigned int mr_ext_memseg_en);
-__rte_internal
uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
struct mlx5_mr_ctrl *mr_ctrl,
struct rte_mempool *mp, uintptr_t addr);
mlx5_common_verbs_dereg_mr; # WINDOWS_NO_EXPORT
mlx5_dev_is_pci;
+ mlx5_dev_mempool_unregister;
+ mlx5_dev_mempool_subscribe;
mlx5_devx_alloc_uar; # WINDOWS_NO_EXPORT
mlx5_mp_uninit_primary; # WINDOWS_NO_EXPORT
mlx5_mp_uninit_secondary; # WINDOWS_NO_EXPORT
- mlx5_mr_addr2mr_bh;
+ mlx5_mprq_buf_free_cb;
mlx5_mr_btree_free;
mlx5_mr_create_primary;
mlx5_mr_ctrl_init;
static int
mlx5_compress_dev_start(struct rte_compressdev *dev)
{
- RTE_SET_USED(dev);
- return 0;
+ struct mlx5_compress_priv *priv = dev->data->dev_private;
+
+ return mlx5_dev_mempool_subscribe(priv->cdev);
}
static void
static int
mlx5_crypto_dev_start(struct rte_cryptodev *dev)
{
- RTE_SET_USED(dev);
- return 0;
+ struct mlx5_crypto_priv *priv = dev->data->dev_private;
+
+ return mlx5_dev_mempool_subscribe(priv->cdev);
}
static int
switch (param->type) {
case MLX5_MP_REQ_CREATE_MR:
mp_init_msg(&priv->mp_id, &mp_res, param->type);
- lkey = mlx5_mr_create_primary(cdev->pd,
- &priv->sh->cdev->mr_scache,
+ lkey = mlx5_mr_create_primary(cdev->pd, &cdev->mr_scache,
&entry, param->args.addr,
cdev->config.mr_ext_memseg_en);
if (lkey == UINT32_MAX)
'mlx5_flow_dv.c',
'mlx5_flow_aso.c',
'mlx5_mac.c',
- 'mlx5_mr.c',
'mlx5_rss.c',
'mlx5_rx.c',
'mlx5_rxmode.c',
}
/**
- * Unregister the mempool from the protection domain.
- *
- * @param sh
- * Pointer to the device shared context.
- * @param mp
- * Mempool being unregistered.
- */
-static void
-mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
- struct rte_mempool *mp)
-{
- struct mlx5_mp_id mp_id;
-
- mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_unregister(&sh->cdev->mr_scache, mp, &mp_id) < 0)
- DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
- mp->name, sh->cdev->pd, rte_strerror(rte_errno));
-}
-
-/**
- * rte_mempool_walk() callback to register mempools
- * for the protection domain.
+ * rte_mempool_walk() callback to unregister Rx mempools.
+ * It used when implicit mempool registration is disabled.
*
* @param mp
* The mempool being walked.
* Pointer to the device shared context.
*/
static void
-mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
+mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
{
struct mlx5_dev_ctx_shared *sh = arg;
- struct mlx5_mp_id mp_id;
- int ret;
- mlx5_mp_id_init(&mp_id, 0);
- ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd, mp,
- &mp_id);
- if (ret < 0 && rte_errno != EEXIST)
- DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
- mp->name, sh->cdev->pd, rte_strerror(rte_errno));
-}
-
-/**
- * rte_mempool_walk() callback to unregister mempools
- * from the protection domain.
- *
- * @param mp
- * The mempool being walked.
- * @param arg
- * Pointer to the device shared context.
- */
-static void
-mlx5_dev_ctx_shared_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
-{
- mlx5_dev_ctx_shared_mempool_unregister
- ((struct mlx5_dev_ctx_shared *)arg, mp);
-}
-
-/**
- * Mempool life cycle callback for Ethernet devices.
- *
- * @param event
- * Mempool life cycle event.
- * @param mp
- * Associated mempool.
- * @param arg
- * Pointer to a device shared context.
- */
-static void
-mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
- struct rte_mempool *mp, void *arg)
-{
- struct mlx5_dev_ctx_shared *sh = arg;
- struct mlx5_mp_id mp_id;
-
- switch (event) {
- case RTE_MEMPOOL_EVENT_READY:
- mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd,
- mp, &mp_id) < 0)
- DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
- mp->name, sh->cdev->pd,
- rte_strerror(rte_errno));
- break;
- case RTE_MEMPOOL_EVENT_DESTROY:
- mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
- break;
- }
+ mlx5_dev_mempool_unregister(sh->cdev, mp);
}
/**
struct mlx5_dev_ctx_shared *sh = arg;
if (event == RTE_MEMPOOL_EVENT_DESTROY)
- mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+ mlx5_dev_mempool_unregister(sh->cdev, mp);
}
int
(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
return ret == 0 || rte_errno == EEXIST ? 0 : ret;
}
- /* Callback for this shared context may be already registered. */
- ret = rte_mempool_event_callback_register
- (mlx5_dev_ctx_shared_mempool_event_cb, sh);
- if (ret != 0 && rte_errno != EEXIST)
- return ret;
- /* Register mempools only once for this shared context. */
- if (ret == 0)
- rte_mempool_walk(mlx5_dev_ctx_shared_mempool_register_cb, sh);
- return 0;
+ return mlx5_dev_mempool_subscribe(sh->cdev);
}
/**
if (--sh->refcnt)
goto exit;
/* Stop watching for mempool events and unregister all mempools. */
- ret = rte_mempool_event_callback_unregister
- (mlx5_dev_ctx_shared_mempool_event_cb, sh);
- if (ret < 0 && rte_errno == ENOENT)
+ if (!sh->cdev->config.mr_mempool_reg_en) {
ret = rte_mempool_event_callback_unregister
(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
- if (ret == 0)
- rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
- sh);
+ if (ret == 0)
+ rte_mempool_walk
+ (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh);
+ }
/* Remove context from the global device list. */
LIST_REMOVE(sh, next);
/* Release flow workspaces objects on the last device. */
int rc; /**< Return code. */
};
-/** Key string for IPC. */
-#define MLX5_MP_NAME "net_mlx5_mp"
-
-/** Initialize a multi-process ID. */
-static inline void
-mlx5_mp_id_init(struct mlx5_mp_id *mp_id, uint16_t port_id)
-{
- mp_id->port_id = port_id;
- strlcpy(mp_id->name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
-}
-
LIST_HEAD(mlx5_dev_list, mlx5_dev_ctx_shared);
/* Shared data between primary and secondary processes. */
/* Global spinlock for primary and secondary processes. */
int init_done; /* Whether primary has done initialization. */
unsigned int secondary_cnt; /* Number of secondary processes init'd. */
- struct mlx5_dev_list mem_event_cb_list;
- rte_rwlock_t mem_event_rwlock;
};
/* Per-process data structure, not visible to other processes. */
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox Technologies, Ltd
- */
-
-#include <rte_eal_memconfig.h>
-#include <rte_mempool.h>
-#include <rte_malloc.h>
-#include <rte_rwlock.h>
-
-#include <mlx5_common_mp.h>
-#include <mlx5_common_mr.h>
-
-#include "mlx5.h"
-#include "mlx5_rxtx.h"
-#include "mlx5_rx.h"
-#include "mlx5_tx.h"
-
-/**
- * Bottom-half of LKey search on Tx.
- *
- * @param txq
- * Pointer to Tx queue structure.
- * @param addr
- * Search key.
- *
- * @return
- * Searched LKey on success, UINT32_MAX on no match.
- */
-static uint32_t
-mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
-{
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq, struct mlx5_txq_ctrl, txq);
- struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct mlx5_priv *priv = txq_ctrl->priv;
-
- return mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id,
- &priv->sh->cdev->mr_scache, mr_ctrl, addr,
- priv->sh->cdev->config.mr_ext_memseg_en);
-}
-
-/**
- * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
- * list, register the mempool of the mbuf as externally allocated memory.
- *
- * @param txq
- * Pointer to Tx queue structure.
- * @param mb
- * Pointer to mbuf.
- *
- * @return
- * Searched LKey on success, UINT32_MAX on no match.
- */
-uint32_t
-mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
-{
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq, struct mlx5_txq_ctrl, txq);
- struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct mlx5_priv *priv = txq_ctrl->priv;
- uintptr_t addr = (uintptr_t)mb->buf_addr;
- uint32_t lkey;
-
- if (priv->sh->cdev->config.mr_mempool_reg_en) {
- struct rte_mempool *mp = NULL;
- struct mlx5_mprq_buf *buf;
-
- if (!RTE_MBUF_HAS_EXTBUF(mb)) {
- mp = mlx5_mb2mp(mb);
- } else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
- /* Recover MPRQ mempool. */
- buf = mb->shinfo->fcb_opaque;
- mp = buf->mp;
- }
- if (mp != NULL) {
- lkey = mlx5_mr_mempool2mr_bh(&priv->sh->cdev->mr_scache,
- mr_ctrl, mp, addr);
- /*
- * Lookup can only fail on invalid input, e.g. "addr"
- * is not from "mp" or "mp" has RTE_MEMPOOL_F_NON_IO set.
- */
- if (lkey != UINT32_MAX)
- return lkey;
- }
- /* Fallback for generic mechanism in corner cases. */
- }
- return mlx5_tx_addr2mr_bh(txq, addr);
-}
#include <mlx5_prm.h>
#include <mlx5_common.h>
+#include <mlx5_common_mr.h>
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
}
-void
-mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
-{
- struct mlx5_mprq_buf *buf = opaque;
-
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
- rte_mempool_put(buf->mp, buf);
- } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
- __ATOMIC_RELAXED) == 0)) {
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
- rte_mempool_put(buf->mp, buf);
- }
-}
-
void
mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
{
uint32_t cqe_cnt; /* Number of CQEs. */
};
-/* Multi-Packet RQ buffer header. */
-struct mlx5_mprq_buf {
- struct rte_mempool *mp;
- uint16_t refcnt; /* Atomically accessed refcnt. */
- uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
- struct rte_mbuf_ext_shared_info shinfos[];
- /*
- * Shared information per stride.
- * More memory will be allocated for the first stride head-room and for
- * the strides data.
- */
-} __rte_cache_aligned;
-
/* Get pointer to the first stride. */
#define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \
sizeof(struct mlx5_mprq_buf) + \
uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
__rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);
-void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
uint16_t pkts_n);
#include <mlx5_glue.h>
#include <mlx5_malloc.h>
+#include <mlx5_common_mr.h>
#include "mlx5_defs.h"
#include "mlx5.h"
int mlx5_queue_state_modify(struct rte_eth_dev *dev,
struct mlx5_mp_arg_queue_state_modify *sm);
-/* mlx5_mr.c */
-
-void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
-int mlx5_net_dma_map(struct rte_device *rte_dev, void *addr, uint64_t iova,
- size_t len);
-int mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr, uint64_t iova,
- size_t len);
-
-/**
- * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
- * cloned mbuf is allocated is returned instead.
- *
- * @param buf
- * Pointer to mbuf.
- *
- * @return
- * Memory pool where data is located for given mbuf.
- */
-static inline struct rte_mempool *
-mlx5_mb2mp(struct rte_mbuf *buf)
-{
- if (unlikely(RTE_MBUF_CLONED(buf)))
- return rte_mbuf_from_indirect(buf)->pool;
- return buf->pool;
-}
-
#endif /* RTE_PMD_MLX5_RXTX_H_ */
int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
struct rte_eth_burst_mode *mode);
-/* mlx5_mr.c */
-
-uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
-
/* mlx5_tx_empw.c */
MLX5_TXOFF_PRE_DECL(full_empw);
#endif
/**
- * Query LKey from a packet buffer for Tx. If not found, add the mempool.
+ * Query LKey from a packet buffer for Tx.
*
* @param txq
* Pointer to Tx queue structure.
- * @param addr
- * Address to search.
+ * @param mb
+ * Pointer to mbuf.
*
* @return
* Searched LKey on success, UINT32_MAX on no match.
mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
{
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- uintptr_t addr = (uintptr_t)mb->buf_addr;
- uint32_t lkey;
-
- /* Check generation bit to see if there's any change on existing MRs. */
- if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
- mlx5_mr_flush_local_cache(mr_ctrl);
- /* Linear search on MR cache array. */
- lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
- MLX5_MR_CACHE_N, addr);
- if (likely(lkey != UINT32_MAX))
- return lkey;
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_priv *priv = txq_ctrl->priv;
+
/* Take slower bottom-half on miss. */
- return mlx5_tx_mb2mr_bh(txq, mb);
+ return mlx5_mr_mb2mr(priv->sh->cdev, &priv->mp_id, mr_ctrl, mb);
}
/**
};
int
-mlx5_regex_start(struct rte_regexdev *dev __rte_unused)
+mlx5_regex_start(struct rte_regexdev *dev)
{
- return 0;
+ struct mlx5_regex_priv *priv = dev->data->dev_private;
+
+ return mlx5_dev_mempool_subscribe(priv->cdev);
}
int