entry = next;
}
SLIST_INIT(&priv->mr_list);
- if (priv->lm_mr.addr)
- mlx5_os_wrapped_mkey_destroy(&priv->lm_mr);
- if (priv->null_mr) {
- claim_zero(mlx5_glue->dereg_mr(priv->null_mr));
- priv->null_mr = NULL;
- }
if (priv->vmem) {
free(priv->vmem);
priv->vmem = NULL;
return mem;
}
+static int
+mlx5_vdpa_mem_cmp(struct rte_vhost_memory *mem1, struct rte_vhost_memory *mem2)
+{
+ uint32_t i;
+
+ if (mem1->nregions != mem2->nregions)
+ return -1;
+ for (i = 0; i < mem1->nregions; i++) {
+ if (mem1->regions[i].guest_phys_addr !=
+ mem2->regions[i].guest_phys_addr)
+ return -1;
+ if (mem1->regions[i].size != mem2->regions[i].size)
+ return -1;
+ }
+ return 0;
+}
+
#define KLM_SIZE_MAX_ALIGN(sz) ((sz) > MLX5_MAX_KLM_BYTE_COUNT ? \
MLX5_MAX_KLM_BYTE_COUNT : (sz))
if (!mem)
return -rte_errno;
- priv->vmem = mem;
- priv->null_mr = mlx5_glue->alloc_null_mr(priv->cdev->pd);
- if (!priv->null_mr) {
- DRV_LOG(ERR, "Failed to allocate null MR.");
- ret = -errno;
- goto error;
+ if (priv->vmem != NULL) {
+ if (mlx5_vdpa_mem_cmp(mem, priv->vmem) == 0) {
+ /* VM memory not changed, reuse resources. */
+ free(mem);
+ return 0;
+ }
+ mlx5_vdpa_mem_dereg(priv);
}
- DRV_LOG(DEBUG, "Dump fill Mkey = %u.", priv->null_mr->lkey);
+ priv->vmem = mem;
for (i = 0; i < mem->nregions; i++) {
reg = &mem->regions[i];
entry = rte_zmalloc(__func__, sizeof(*entry), 0);
priv->gpa_mkey_index = entry->mkey->id;
return 0;
error:
- if (entry)
- rte_free(entry);
+ rte_free(entry);
mlx5_vdpa_mem_dereg(priv);
rte_errno = -ret;
return ret;