lib: remove librte_ prefix from directory names
[dpdk.git] / lib / vhost / iotlb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2017 Red Hat, Inc.
3  */
4
5 #ifdef RTE_LIBRTE_VHOST_NUMA
6 #include <numaif.h>
7 #endif
8
9 #include <rte_tailq.h>
10
11 #include "iotlb.h"
12 #include "vhost.h"
13
14 struct vhost_iotlb_entry {
15         TAILQ_ENTRY(vhost_iotlb_entry) next;
16
17         uint64_t iova;
18         uint64_t uaddr;
19         uint64_t size;
20         uint8_t perm;
21 };
22
23 #define IOTLB_CACHE_SIZE 2048
24
25 static void
26 vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq);
27
28 static void
29 vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
30 {
31         struct vhost_iotlb_entry *node, *temp_node;
32
33         rte_rwlock_write_lock(&vq->iotlb_pending_lock);
34
35         TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
36                 TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
37                 rte_mempool_put(vq->iotlb_pool, node);
38         }
39
40         rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
41 }
42
43 bool
44 vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
45                                 uint8_t perm)
46 {
47         struct vhost_iotlb_entry *node;
48         bool found = false;
49
50         rte_rwlock_read_lock(&vq->iotlb_pending_lock);
51
52         TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) {
53                 if ((node->iova == iova) && (node->perm == perm)) {
54                         found = true;
55                         break;
56                 }
57         }
58
59         rte_rwlock_read_unlock(&vq->iotlb_pending_lock);
60
61         return found;
62 }
63
64 void
65 vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
66                                 uint64_t iova, uint8_t perm)
67 {
68         struct vhost_iotlb_entry *node;
69         int ret;
70
71         ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
72         if (ret) {
73                 VHOST_LOG_CONFIG(DEBUG, "IOTLB pool empty, clear entries\n");
74                 if (!TAILQ_EMPTY(&vq->iotlb_pending_list))
75                         vhost_user_iotlb_pending_remove_all(vq);
76                 else
77                         vhost_user_iotlb_cache_random_evict(vq);
78                 ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
79                 if (ret) {
80                         VHOST_LOG_CONFIG(ERR, "IOTLB pool still empty, failure\n");
81                         return;
82                 }
83         }
84
85         node->iova = iova;
86         node->perm = perm;
87
88         rte_rwlock_write_lock(&vq->iotlb_pending_lock);
89
90         TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next);
91
92         rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
93 }
94
95 void
96 vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
97                                 uint64_t iova, uint64_t size, uint8_t perm)
98 {
99         struct vhost_iotlb_entry *node, *temp_node;
100
101         rte_rwlock_write_lock(&vq->iotlb_pending_lock);
102
103         TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
104                 if (node->iova < iova)
105                         continue;
106                 if (node->iova >= iova + size)
107                         continue;
108                 if ((node->perm & perm) != node->perm)
109                         continue;
110                 TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
111                 rte_mempool_put(vq->iotlb_pool, node);
112         }
113
114         rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
115 }
116
117 static void
118 vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
119 {
120         struct vhost_iotlb_entry *node, *temp_node;
121
122         rte_rwlock_write_lock(&vq->iotlb_lock);
123
124         TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
125                 TAILQ_REMOVE(&vq->iotlb_list, node, next);
126                 rte_mempool_put(vq->iotlb_pool, node);
127         }
128
129         vq->iotlb_cache_nr = 0;
130
131         rte_rwlock_write_unlock(&vq->iotlb_lock);
132 }
133
134 static void
135 vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq)
136 {
137         struct vhost_iotlb_entry *node, *temp_node;
138         int entry_idx;
139
140         rte_rwlock_write_lock(&vq->iotlb_lock);
141
142         entry_idx = rte_rand() % vq->iotlb_cache_nr;
143
144         TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
145                 if (!entry_idx) {
146                         TAILQ_REMOVE(&vq->iotlb_list, node, next);
147                         rte_mempool_put(vq->iotlb_pool, node);
148                         vq->iotlb_cache_nr--;
149                         break;
150                 }
151                 entry_idx--;
152         }
153
154         rte_rwlock_write_unlock(&vq->iotlb_lock);
155 }
156
157 void
158 vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
159                                 uint64_t uaddr, uint64_t size, uint8_t perm)
160 {
161         struct vhost_iotlb_entry *node, *new_node;
162         int ret;
163
164         ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
165         if (ret) {
166                 VHOST_LOG_CONFIG(DEBUG, "IOTLB pool empty, clear entries\n");
167                 if (!TAILQ_EMPTY(&vq->iotlb_list))
168                         vhost_user_iotlb_cache_random_evict(vq);
169                 else
170                         vhost_user_iotlb_pending_remove_all(vq);
171                 ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
172                 if (ret) {
173                         VHOST_LOG_CONFIG(ERR, "IOTLB pool still empty, failure\n");
174                         return;
175                 }
176         }
177
178         new_node->iova = iova;
179         new_node->uaddr = uaddr;
180         new_node->size = size;
181         new_node->perm = perm;
182
183         rte_rwlock_write_lock(&vq->iotlb_lock);
184
185         TAILQ_FOREACH(node, &vq->iotlb_list, next) {
186                 /*
187                  * Entries must be invalidated before being updated.
188                  * So if iova already in list, assume identical.
189                  */
190                 if (node->iova == new_node->iova) {
191                         rte_mempool_put(vq->iotlb_pool, new_node);
192                         goto unlock;
193                 } else if (node->iova > new_node->iova) {
194                         TAILQ_INSERT_BEFORE(node, new_node, next);
195                         vq->iotlb_cache_nr++;
196                         goto unlock;
197                 }
198         }
199
200         TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
201         vq->iotlb_cache_nr++;
202
203 unlock:
204         vhost_user_iotlb_pending_remove(vq, iova, size, perm);
205
206         rte_rwlock_write_unlock(&vq->iotlb_lock);
207
208 }
209
210 void
211 vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
212                                         uint64_t iova, uint64_t size)
213 {
214         struct vhost_iotlb_entry *node, *temp_node;
215
216         if (unlikely(!size))
217                 return;
218
219         rte_rwlock_write_lock(&vq->iotlb_lock);
220
221         TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
222                 /* Sorted list */
223                 if (unlikely(iova + size < node->iova))
224                         break;
225
226                 if (iova < node->iova + node->size) {
227                         TAILQ_REMOVE(&vq->iotlb_list, node, next);
228                         rte_mempool_put(vq->iotlb_pool, node);
229                         vq->iotlb_cache_nr--;
230                 }
231         }
232
233         rte_rwlock_write_unlock(&vq->iotlb_lock);
234 }
235
236 uint64_t
237 vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
238                                                 uint64_t *size, uint8_t perm)
239 {
240         struct vhost_iotlb_entry *node;
241         uint64_t offset, vva = 0, mapped = 0;
242
243         if (unlikely(!*size))
244                 goto out;
245
246         TAILQ_FOREACH(node, &vq->iotlb_list, next) {
247                 /* List sorted by iova */
248                 if (unlikely(iova < node->iova))
249                         break;
250
251                 if (iova >= node->iova + node->size)
252                         continue;
253
254                 if (unlikely((perm & node->perm) != perm)) {
255                         vva = 0;
256                         break;
257                 }
258
259                 offset = iova - node->iova;
260                 if (!vva)
261                         vva = node->uaddr + offset;
262
263                 mapped += node->size - offset;
264                 iova = node->iova + node->size;
265
266                 if (mapped >= *size)
267                         break;
268         }
269
270 out:
271         /* Only part of the requested chunk is mapped */
272         if (unlikely(mapped < *size))
273                 *size = mapped;
274
275         return vva;
276 }
277
278 void
279 vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq)
280 {
281         vhost_user_iotlb_cache_remove_all(vq);
282         vhost_user_iotlb_pending_remove_all(vq);
283 }
284
285 int
286 vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
287 {
288         char pool_name[RTE_MEMPOOL_NAMESIZE];
289         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
290         int socket = 0;
291
292         if (vq->iotlb_pool) {
293                 /*
294                  * The cache has already been initialized,
295                  * just drop all cached and pending entries.
296                  */
297                 vhost_user_iotlb_flush_all(vq);
298         }
299
300 #ifdef RTE_LIBRTE_VHOST_NUMA
301         if (get_mempolicy(&socket, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR) != 0)
302                 socket = 0;
303 #endif
304
305         rte_rwlock_init(&vq->iotlb_lock);
306         rte_rwlock_init(&vq->iotlb_pending_lock);
307
308         TAILQ_INIT(&vq->iotlb_list);
309         TAILQ_INIT(&vq->iotlb_pending_list);
310
311         snprintf(pool_name, sizeof(pool_name), "iotlb_%u_%d_%d",
312                         getpid(), dev->vid, vq_index);
313         VHOST_LOG_CONFIG(DEBUG, "IOTLB cache name: %s\n", pool_name);
314
315         /* If already created, free it and recreate */
316         vq->iotlb_pool = rte_mempool_lookup(pool_name);
317         if (vq->iotlb_pool)
318                 rte_mempool_free(vq->iotlb_pool);
319
320         vq->iotlb_pool = rte_mempool_create(pool_name,
321                         IOTLB_CACHE_SIZE, sizeof(struct vhost_iotlb_entry), 0,
322                         0, 0, NULL, NULL, NULL, socket,
323                         MEMPOOL_F_NO_CACHE_ALIGN |
324                         MEMPOOL_F_SP_PUT);
325         if (!vq->iotlb_pool) {
326                 VHOST_LOG_CONFIG(ERR,
327                                 "Failed to create IOTLB cache pool (%s)\n",
328                                 pool_name);
329                 return -1;
330         }
331
332         vq->iotlb_cache_nr = 0;
333
334         return 0;
335 }