1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2017 Red Hat, Inc.
5 #ifdef RTE_LIBRTE_VHOST_NUMA
14 struct vhost_iotlb_entry {
15 TAILQ_ENTRY(vhost_iotlb_entry) next;
23 #define IOTLB_CACHE_SIZE 2048
26 vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq);
29 vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
31 struct vhost_iotlb_entry *node, *temp_node;
33 rte_rwlock_write_lock(&vq->iotlb_pending_lock);
35 TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
36 TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
37 rte_mempool_put(vq->iotlb_pool, node);
40 rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
44 vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
47 struct vhost_iotlb_entry *node;
50 rte_rwlock_read_lock(&vq->iotlb_pending_lock);
52 TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) {
53 if ((node->iova == iova) && (node->perm == perm)) {
59 rte_rwlock_read_unlock(&vq->iotlb_pending_lock);
65 vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
66 uint64_t iova, uint8_t perm)
68 struct vhost_iotlb_entry *node;
71 ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
73 VHOST_LOG_CONFIG(DEBUG, "IOTLB pool empty, clear entries\n");
74 if (!TAILQ_EMPTY(&vq->iotlb_pending_list))
75 vhost_user_iotlb_pending_remove_all(vq);
77 vhost_user_iotlb_cache_random_evict(vq);
78 ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
80 VHOST_LOG_CONFIG(ERR, "IOTLB pool still empty, failure\n");
88 rte_rwlock_write_lock(&vq->iotlb_pending_lock);
90 TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next);
92 rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
96 vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
97 uint64_t iova, uint64_t size, uint8_t perm)
99 struct vhost_iotlb_entry *node, *temp_node;
101 rte_rwlock_write_lock(&vq->iotlb_pending_lock);
103 TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
104 if (node->iova < iova)
106 if (node->iova >= iova + size)
108 if ((node->perm & perm) != node->perm)
110 TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
111 rte_mempool_put(vq->iotlb_pool, node);
114 rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
118 vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
120 struct vhost_iotlb_entry *node, *temp_node;
122 rte_rwlock_write_lock(&vq->iotlb_lock);
124 TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
125 TAILQ_REMOVE(&vq->iotlb_list, node, next);
126 rte_mempool_put(vq->iotlb_pool, node);
129 vq->iotlb_cache_nr = 0;
131 rte_rwlock_write_unlock(&vq->iotlb_lock);
135 vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq)
137 struct vhost_iotlb_entry *node, *temp_node;
140 rte_rwlock_write_lock(&vq->iotlb_lock);
142 entry_idx = rte_rand() % vq->iotlb_cache_nr;
144 TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
146 TAILQ_REMOVE(&vq->iotlb_list, node, next);
147 rte_mempool_put(vq->iotlb_pool, node);
148 vq->iotlb_cache_nr--;
154 rte_rwlock_write_unlock(&vq->iotlb_lock);
158 vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
159 uint64_t uaddr, uint64_t size, uint8_t perm)
161 struct vhost_iotlb_entry *node, *new_node;
164 ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
166 VHOST_LOG_CONFIG(DEBUG, "IOTLB pool empty, clear entries\n");
167 if (!TAILQ_EMPTY(&vq->iotlb_list))
168 vhost_user_iotlb_cache_random_evict(vq);
170 vhost_user_iotlb_pending_remove_all(vq);
171 ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
173 VHOST_LOG_CONFIG(ERR, "IOTLB pool still empty, failure\n");
178 new_node->iova = iova;
179 new_node->uaddr = uaddr;
180 new_node->size = size;
181 new_node->perm = perm;
183 rte_rwlock_write_lock(&vq->iotlb_lock);
185 TAILQ_FOREACH(node, &vq->iotlb_list, next) {
187 * Entries must be invalidated before being updated.
188 * So if iova already in list, assume identical.
190 if (node->iova == new_node->iova) {
191 rte_mempool_put(vq->iotlb_pool, new_node);
193 } else if (node->iova > new_node->iova) {
194 TAILQ_INSERT_BEFORE(node, new_node, next);
195 vq->iotlb_cache_nr++;
200 TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
201 vq->iotlb_cache_nr++;
204 vhost_user_iotlb_pending_remove(vq, iova, size, perm);
206 rte_rwlock_write_unlock(&vq->iotlb_lock);
211 vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
212 uint64_t iova, uint64_t size)
214 struct vhost_iotlb_entry *node, *temp_node;
219 rte_rwlock_write_lock(&vq->iotlb_lock);
221 TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
223 if (unlikely(iova + size < node->iova))
226 if (iova < node->iova + node->size) {
227 TAILQ_REMOVE(&vq->iotlb_list, node, next);
228 rte_mempool_put(vq->iotlb_pool, node);
229 vq->iotlb_cache_nr--;
233 rte_rwlock_write_unlock(&vq->iotlb_lock);
237 vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
238 uint64_t *size, uint8_t perm)
240 struct vhost_iotlb_entry *node;
241 uint64_t offset, vva = 0, mapped = 0;
243 if (unlikely(!*size))
246 TAILQ_FOREACH(node, &vq->iotlb_list, next) {
247 /* List sorted by iova */
248 if (unlikely(iova < node->iova))
251 if (iova >= node->iova + node->size)
254 if (unlikely((perm & node->perm) != perm)) {
259 offset = iova - node->iova;
261 vva = node->uaddr + offset;
263 mapped += node->size - offset;
264 iova = node->iova + node->size;
271 /* Only part of the requested chunk is mapped */
272 if (unlikely(mapped < *size))
279 vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq)
281 vhost_user_iotlb_cache_remove_all(vq);
282 vhost_user_iotlb_pending_remove_all(vq);
286 vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
288 char pool_name[RTE_MEMPOOL_NAMESIZE];
289 struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
292 if (vq->iotlb_pool) {
294 * The cache has already been initialized,
295 * just drop all cached and pending entries.
297 vhost_user_iotlb_flush_all(vq);
300 #ifdef RTE_LIBRTE_VHOST_NUMA
301 if (get_mempolicy(&socket, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR) != 0)
305 rte_rwlock_init(&vq->iotlb_lock);
306 rte_rwlock_init(&vq->iotlb_pending_lock);
308 TAILQ_INIT(&vq->iotlb_list);
309 TAILQ_INIT(&vq->iotlb_pending_list);
311 snprintf(pool_name, sizeof(pool_name), "iotlb_%u_%d_%d",
312 getpid(), dev->vid, vq_index);
313 VHOST_LOG_CONFIG(DEBUG, "IOTLB cache name: %s\n", pool_name);
315 /* If already created, free it and recreate */
316 vq->iotlb_pool = rte_mempool_lookup(pool_name);
318 rte_mempool_free(vq->iotlb_pool);
320 vq->iotlb_pool = rte_mempool_create(pool_name,
321 IOTLB_CACHE_SIZE, sizeof(struct vhost_iotlb_entry), 0,
322 0, 0, NULL, NULL, NULL, socket,
323 MEMPOOL_F_NO_CACHE_ALIGN |
326 if (!vq->iotlb_pool) {
327 VHOST_LOG_CONFIG(ERR,
328 "Failed to create IOTLB cache pool (%s)\n",
333 vq->iotlb_cache_nr = 0;