+ mr = mlx5_mr_get(priv->dev, mp);
+ if (mr) {
+ mlx5_mr_release(mr);
+ return;
+ }
+ mr = mlx5_mr_new(priv->dev, mp);
+ if (!mr)
+ DRV_LOG(ERR, "port %u cannot create memory region: %s",
+ priv->dev->data->port_id, strerror(rte_errno));
+}
+
+/**
+ * Register a new memory region from the mempool and store it in the memory
+ * region list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mp
+ * Pointer to the memory pool to register.
+ *
+ * @return
+ * The memory region on success, NULL on failure and rte_errno is set.
+ */
+struct mlx5_mr *
+mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ uintptr_t start;
+ uintptr_t end;
+ unsigned int i;
+ struct mlx5_mr *mr;
+
+ mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
+ if (!mr) {
+ DRV_LOG(DEBUG,
+ "port %u unable to configure memory region,"
+ " ibv_reg_mr() failed.",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ if (mlx5_check_mempool(mp, &start, &end) != 0) {
+ DRV_LOG(ERR, "port %u mempool %p: not virtually contiguous",
+ dev->data->port_id, (void *)mp);
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ DRV_LOG(DEBUG, "port %u mempool %p area start=%p end=%p size=%zu",
+ dev->data->port_id, (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ /* Save original addresses for exact MR lookup. */
+ mr->start = start;
+ mr->end = end;
+ /* Round start and end to page boundary if found in memory segments. */
+ for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
+ uintptr_t addr = (uintptr_t)ms[i].addr;
+ size_t len = ms[i].len;
+ unsigned int align = ms[i].hugepage_sz;
+
+ if ((start > addr) && (start < addr + len))
+ start = RTE_ALIGN_FLOOR(start, align);
+ if ((end > addr) && (end < addr + len))
+ end = RTE_ALIGN_CEIL(end, align);
+ }
+ DRV_LOG(DEBUG,
+ "port %u mempool %p using start=%p end=%p size=%zu for memory"
+ " region",
+ dev->data->port_id, (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (!mr->mr) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ mr->mp = mp;
+ mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
+ rte_atomic32_inc(&mr->refcnt);
+ DRV_LOG(DEBUG, "port %u new memory Region %p refcnt: %d",
+ dev->data->port_id, (void *)mr, rte_atomic32_read(&mr->refcnt));
+ LIST_INSERT_HEAD(&priv->mr, mr, next);
+ return mr;
+}
+
+/**
+ * Search the memory region object in the memory region list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mp
+ * Pointer to the memory pool to register.
+ *
+ * @return
+ * The memory region on success.
+ */
+struct mlx5_mr *
+mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr;
+
+ assert(mp);
+ if (LIST_EMPTY(&priv->mr))
+ return NULL;
+ LIST_FOREACH(mr, &priv->mr, next) {
+ if (mr->mp == mp) {
+ rte_atomic32_inc(&mr->refcnt);
+ DRV_LOG(DEBUG, "port %u memory region %p refcnt: %d",
+ dev->data->port_id, (void *)mr,
+ rte_atomic32_read(&mr->refcnt));
+ return mr;