1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
6 #include <rte_malloc.h>
8 #include <rte_common.h>
9 #include <rte_sched_common.h>
12 #include <mlx5_common.h>
14 #include "mlx5_vdpa_utils.h"
15 #include "mlx5_vdpa.h"
18 mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv)
20 struct mlx5_vdpa_query_mr *entry;
21 struct mlx5_vdpa_query_mr *next;
23 entry = SLIST_FIRST(&priv->mr_list);
25 next = SLIST_NEXT(entry, next);
26 if (entry->is_indirect)
27 claim_zero(mlx5_devx_cmd_destroy(entry->mkey));
29 claim_zero(mlx5_glue->dereg_mr(entry->mr));
30 SLIST_REMOVE(&priv->mr_list, entry, mlx5_vdpa_query_mr, next);
34 SLIST_INIT(&priv->mr_list);
36 mlx5_os_wrapped_mkey_destroy(&priv->lm_mr);
44 mlx5_vdpa_regions_addr_cmp(const void *a, const void *b)
46 const struct rte_vhost_mem_region *region_a = a;
47 const struct rte_vhost_mem_region *region_b = b;
49 if (region_a->guest_phys_addr < region_b->guest_phys_addr)
51 if (region_a->guest_phys_addr > region_b->guest_phys_addr)
56 #define KLM_NUM_MAX_ALIGN(sz) (RTE_ALIGN_CEIL(sz, MLX5_MAX_KLM_BYTE_COUNT) / \
57 MLX5_MAX_KLM_BYTE_COUNT)
60 * Allocate and sort the region list and choose indirect mkey mode:
61 * 1. Calculate GCD, guest memory size and indirect mkey entries num per mode.
62 * 2. Align GCD to the maximum allowed size(2G) and to be power of 2.
63 * 2. Decide the indirect mkey mode according to the next rules:
64 * a. If both KLM_FBS entries number and KLM entries number are bigger
65 * than the maximum allowed(MLX5_DEVX_MAX_KLM_ENTRIES) - error.
66 * b. KLM mode if KLM_FBS entries number is bigger than the maximum
67 * allowed(MLX5_DEVX_MAX_KLM_ENTRIES).
68 * c. KLM mode if GCD is smaller than the minimum allowed(4K).
69 * d. KLM mode if the total size of KLM entries is in one cache line
70 * and the total size of KLM_FBS entries is not in one cache line.
71 * e. Otherwise, KLM_FBS mode.
73 static struct rte_vhost_memory *
74 mlx5_vdpa_vhost_mem_regions_prepare(int vid, uint8_t *mode, uint64_t *mem_size,
75 uint64_t *gcd, uint32_t *entries_num)
77 struct rte_vhost_memory *mem;
79 uint64_t klm_entries_num = 0;
80 uint64_t klm_fbs_entries_num;
82 int ret = rte_vhost_get_mem_table(vid, &mem);
85 DRV_LOG(ERR, "Failed to get VM memory layout vid =%d.", vid);
89 qsort(mem->regions, mem->nregions, sizeof(mem->regions[0]),
90 mlx5_vdpa_regions_addr_cmp);
91 *mem_size = (mem->regions[(mem->nregions - 1)].guest_phys_addr) +
92 (mem->regions[(mem->nregions - 1)].size) -
93 (mem->regions[0].guest_phys_addr);
95 for (i = 0; i < mem->nregions; ++i) {
96 DRV_LOG(INFO, "Region %u: HVA 0x%" PRIx64 ", GPA 0x%" PRIx64
97 ", size 0x%" PRIx64 ".", i,
98 mem->regions[i].host_user_addr,
99 mem->regions[i].guest_phys_addr, mem->regions[i].size);
102 size = mem->regions[i].guest_phys_addr -
103 (mem->regions[i - 1].guest_phys_addr +
104 mem->regions[i - 1].size);
105 *gcd = rte_get_gcd64(*gcd, size);
106 klm_entries_num += KLM_NUM_MAX_ALIGN(size);
108 size = mem->regions[i].size;
109 *gcd = rte_get_gcd64(*gcd, size);
110 klm_entries_num += KLM_NUM_MAX_ALIGN(size);
112 if (*gcd > MLX5_MAX_KLM_BYTE_COUNT)
113 *gcd = rte_get_gcd64(*gcd, MLX5_MAX_KLM_BYTE_COUNT);
114 if (!RTE_IS_POWER_OF_2(*gcd)) {
115 uint64_t candidate_gcd = rte_align64prevpow2(*gcd);
117 while (candidate_gcd > 1 && (*gcd % candidate_gcd))
119 DRV_LOG(DEBUG, "GCD 0x%" PRIx64 " is not power of 2. Adjusted "
120 "GCD is 0x%" PRIx64 ".", *gcd, candidate_gcd);
121 *gcd = candidate_gcd;
123 klm_fbs_entries_num = *mem_size / *gcd;
124 if (*gcd < MLX5_MIN_KLM_FIXED_BUFFER_SIZE || klm_fbs_entries_num >
125 MLX5_DEVX_MAX_KLM_ENTRIES ||
126 ((klm_entries_num * sizeof(struct mlx5_klm)) <=
127 RTE_CACHE_LINE_SIZE && (klm_fbs_entries_num *
128 sizeof(struct mlx5_klm)) >
129 RTE_CACHE_LINE_SIZE)) {
130 *mode = MLX5_MKC_ACCESS_MODE_KLM;
131 *entries_num = klm_entries_num;
132 DRV_LOG(INFO, "Indirect mkey mode is KLM.");
134 *mode = MLX5_MKC_ACCESS_MODE_KLM_FBS;
135 *entries_num = klm_fbs_entries_num;
136 DRV_LOG(INFO, "Indirect mkey mode is KLM Fixed Buffer Size.");
138 DRV_LOG(DEBUG, "Memory registration information: nregions = %u, "
139 "mem_size = 0x%" PRIx64 ", GCD = 0x%" PRIx64
140 ", klm_fbs_entries_num = 0x%" PRIx64 ", klm_entries_num = 0x%"
141 PRIx64 ".", mem->nregions, *mem_size, *gcd, klm_fbs_entries_num,
143 if (*entries_num > MLX5_DEVX_MAX_KLM_ENTRIES) {
144 DRV_LOG(ERR, "Failed to prepare memory of vid %d - memory is "
145 "too fragmented.", vid);
152 #define KLM_SIZE_MAX_ALIGN(sz) ((sz) > MLX5_MAX_KLM_BYTE_COUNT ? \
153 MLX5_MAX_KLM_BYTE_COUNT : (sz))
156 * The target here is to group all the physical memory regions of the
157 * virtio device in one indirect mkey.
158 * For KLM Fixed Buffer Size mode (HW find the translation entry in one
159 * read according to the guest physical address):
160 * All the sub-direct mkeys of it must be in the same size, hence, each
161 * one of them should be in the GCD size of all the virtio memory
162 * regions and the holes between them.
163 * For KLM mode (each entry may be in different size so HW must iterate
165 * Each virtio memory region and each hole between them have one entry,
166 * just need to cover the maximum allowed size(2G) by splitting entries
167 * which their associated memory regions are bigger than 2G.
168 * It means that each virtio memory region may be mapped to more than
169 * one direct mkey in the 2 modes.
170 * All the holes of invalid memory between the virtio memory regions
171 * will be mapped to the null memory region for security.
174 mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
176 struct mlx5_devx_mkey_attr mkey_attr;
177 struct mlx5_vdpa_query_mr *entry = NULL;
178 struct rte_vhost_mem_region *reg = NULL;
180 uint32_t entries_num = 0;
188 struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare
189 (priv->vid, &mode, &mem_size, &gcd, &entries_num);
190 struct mlx5_klm klm_array[entries_num];
195 for (i = 0; i < mem->nregions; i++) {
196 reg = &mem->regions[i];
197 entry = rte_zmalloc(__func__, sizeof(*entry), 0);
200 DRV_LOG(ERR, "Failed to allocate mem entry memory.");
203 entry->mr = mlx5_glue->reg_mr_iova(priv->cdev->pd,
204 (void *)(uintptr_t)(reg->host_user_addr),
205 reg->size, reg->guest_phys_addr,
206 IBV_ACCESS_LOCAL_WRITE);
208 DRV_LOG(ERR, "Failed to create direct Mkey.");
212 entry->is_indirect = 0;
215 uint64_t empty_region_sz = reg->guest_phys_addr -
216 (mem->regions[i - 1].guest_phys_addr +
217 mem->regions[i - 1].size);
219 if (empty_region_sz > 0) {
220 sadd = mem->regions[i - 1].guest_phys_addr +
221 mem->regions[i - 1].size;
222 klm_size = mode == MLX5_MKC_ACCESS_MODE_KLM ?
223 KLM_SIZE_MAX_ALIGN(empty_region_sz) : gcd;
224 for (k = 0; k < empty_region_sz;
226 klm_array[klm_index].byte_count =
227 k + klm_size > empty_region_sz ?
228 empty_region_sz - k : klm_size;
229 klm_array[klm_index].mkey =
231 klm_array[klm_index].address = sadd + k;
236 klm_size = mode == MLX5_MKC_ACCESS_MODE_KLM ?
237 KLM_SIZE_MAX_ALIGN(reg->size) : gcd;
238 for (k = 0; k < reg->size; k += klm_size) {
239 klm_array[klm_index].byte_count = k + klm_size >
240 reg->size ? reg->size - k : klm_size;
241 klm_array[klm_index].mkey = entry->mr->lkey;
242 klm_array[klm_index].address = reg->guest_phys_addr + k;
245 SLIST_INSERT_HEAD(&priv->mr_list, entry, next);
247 memset(&mkey_attr, 0, sizeof(mkey_attr));
248 mkey_attr.addr = (uintptr_t)(mem->regions[0].guest_phys_addr);
249 mkey_attr.size = mem_size;
250 mkey_attr.pd = priv->cdev->pdn;
251 mkey_attr.umem_id = 0;
252 /* Must be zero for KLM mode. */
253 mkey_attr.log_entity_size = mode == MLX5_MKC_ACCESS_MODE_KLM_FBS ?
254 rte_log2_u64(gcd) : 0;
255 mkey_attr.pg_access = 0;
256 mkey_attr.klm_array = klm_array;
257 mkey_attr.klm_num = klm_index;
258 entry = rte_zmalloc(__func__, sizeof(*entry), 0);
260 DRV_LOG(ERR, "Failed to allocate memory for indirect entry.");
264 entry->mkey = mlx5_devx_cmd_mkey_create(priv->cdev->ctx, &mkey_attr);
266 DRV_LOG(ERR, "Failed to create indirect Mkey.");
270 entry->is_indirect = 1;
271 SLIST_INSERT_HEAD(&priv->mr_list, entry, next);
272 priv->gpa_mkey_index = entry->mkey->id;
276 mlx5_vdpa_mem_dereg(priv);