#include <rte_bus_pci.h>
#include <mlx5_common.h>
+#include <mlx5_common_mr.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
int mlx5_regex_logtype;
+TAILQ_HEAD(regex_mem_event, mlx5_regex_priv) mlx5_mem_event_list =
+ TAILQ_HEAD_INITIALIZER(mlx5_mem_event_list);
+static pthread_mutex_t mem_event_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
const struct rte_regexdev_ops mlx5_regexdev_ops = {
.dev_info_get = mlx5_regex_info_get,
.dev_configure = mlx5_regex_configure,
sprintf(name, "mlx5_regex_%s", dev->name);
}
+/**
+ * Callback for memory event.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
+ */
+static void
+mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct mlx5_regex_priv *priv;
+
+ /* Must be called from the primary process. */
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ pthread_mutex_lock(&mem_event_list_lock);
+ /* Iterate all the existing mlx5 devices. */
+ TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb)
+ mlx5_free_mr_by_addr(&priv->mr_scache,
+ priv->ctx->device->name,
+ addr, len);
+ pthread_mutex_unlock(&mem_event_list_lock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
+ }
+}
+
static int
mlx5_regex_dev_probe(struct rte_device *rte_dev)
{
rte_errno = ENOMEM;
goto error;
}
+ /* Register callback function for global shared MR cache management. */
+ if (TAILQ_EMPTY(&mlx5_mem_event_list))
+ rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+ mlx5_regex_mr_mem_event_cb,
+ NULL);
+ /* Add device to memory callback list. */
+ pthread_mutex_lock(&mem_event_list_lock);
+ TAILQ_INSERT_TAIL(&mlx5_mem_event_list, priv, mem_event_cb);
+ pthread_mutex_unlock(&mem_event_list_lock);
DRV_LOG(INFO, "RegEx GGA is %s.",
priv->has_umr ? "supported" : "unsupported");
return 0;
return 0;
priv = dev->data->dev_private;
if (priv) {
+ /* Remove from memory callback device list. */
+ pthread_mutex_lock(&mem_event_list_lock);
+ TAILQ_REMOVE(&mlx5_mem_event_list, priv, mem_event_cb);
+ pthread_mutex_unlock(&mem_event_list_lock);
+ if (TAILQ_EMPTY(&mlx5_mem_event_list))
+ rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
+ NULL);
if (priv->pd)
mlx5_glue->dealloc_pd(priv->pd);
if (priv->uar)
seg->imm = imm;
}
+/**
+ * Query LKey from a packet buffer for QP. If not found, add the mempool.
+ *
+ * @param priv
+ * Pointer to the priv object.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mbuf
+ * Pointer to source mbuf, to search in.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static inline uint32_t
+mlx5_regex_addr2mr(struct mlx5_regex_priv *priv, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mbuf *mbuf)
+{
+ uintptr_t addr = rte_pktmbuf_mtod(mbuf, uintptr_t);
+ uint32_t lkey;
+
+ /* Check generation bit to see if there's any change on existing MRs. */
+ if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
+ mlx5_mr_flush_local_cache(mr_ctrl);
+ /* Linear search on MR cache array. */
+ lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX5_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half on miss. */
+ return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
+ !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
+}
+
+
static inline void
__prep_one(struct mlx5_regex_priv *priv, struct mlx5_regex_sq *sq,
struct rte_regex_ops *op, struct mlx5_regex_job *job,
struct mlx5_klm klm;
klm.byte_count = rte_pktmbuf_data_len(op->mbuf);
- klm.mkey = mlx5_mr_addr2mr_bh(priv->pd, 0,
- &priv->mr_scache, &qp->mr_ctrl,
- rte_pktmbuf_mtod(op->mbuf, uintptr_t),
- !!(op->mbuf->ol_flags & EXT_ATTACHED_MBUF));
+ klm.mkey = mlx5_regex_addr2mr(priv, &qp->mr_ctrl, op->mbuf);
klm.address = rte_pktmbuf_mtod(op->mbuf, uintptr_t);
__prep_one(priv, sq, op, job, sq->pi, &klm);
sq->db_pi = sq->pi;
(qp->jobs[mkey_job_id].imkey->id);
while (mbuf) {
/* Build indirect mkey seg's KLM. */
- mkey_klm->mkey = mlx5_mr_addr2mr_bh(priv->pd,
- NULL, &priv->mr_scache, &qp->mr_ctrl,
- rte_pktmbuf_mtod(mbuf, uintptr_t),
- !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
+ mkey_klm->mkey = mlx5_regex_addr2mr
+ (priv, &qp->mr_ctrl, mbuf);
mkey_klm->address = rte_cpu_to_be_64
(rte_pktmbuf_mtod(mbuf, uintptr_t));
mkey_klm->byte_count = rte_cpu_to_be_32
klm.byte_count = scatter_size;
} else {
/* The single mubf case. Build the KLM directly. */
- klm.mkey = mlx5_mr_addr2mr_bh(priv->pd, NULL,
- &priv->mr_scache, &qp->mr_ctrl,
- rte_pktmbuf_mtod(mbuf, uintptr_t),
- !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
+ klm.mkey = mlx5_regex_addr2mr(priv, &qp->mr_ctrl, mbuf);
klm.address = rte_pktmbuf_mtod(mbuf, uintptr_t);
klm.byte_count = rte_pktmbuf_data_len(mbuf);
}