64bf3d19ffb08bb46f165b1494ee21f1762650d9
[dpdk.git] / examples / vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_vhost.h>
12
13 #include "main.h"
14
15 /*
16  * A very simple vhost-user net driver implementation, without
17  * any extra features being enabled, such as TSO and mrg-Rx.
18  */
19
20 void
21 vs_vhost_net_setup(struct vhost_dev *dev)
22 {
23         uint16_t i;
24         int vid = dev->vid;
25         struct vhost_queue *queue;
26
27         RTE_LOG(INFO, VHOST_CONFIG,
28                 "setting builtin vhost-user net driver\n");
29
30         rte_vhost_get_negotiated_features(vid, &dev->features);
31         if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
32                 dev->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
33         else
34                 dev->hdr_len = sizeof(struct virtio_net_hdr);
35
36         rte_vhost_get_mem_table(vid, &dev->mem);
37
38         dev->nr_vrings = rte_vhost_get_vring_num(vid);
39         for (i = 0; i < dev->nr_vrings; i++) {
40                 queue = &dev->queues[i];
41
42                 queue->last_used_idx  = 0;
43                 queue->last_avail_idx = 0;
44                 rte_vhost_get_vhost_vring(vid, i, &queue->vr);
45         }
46 }
47
48 void
49 vs_vhost_net_remove(struct vhost_dev *dev)
50 {
51         free(dev->mem);
52 }
53
54 static __rte_always_inline int
55 enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
56             struct rte_mbuf *m, uint16_t desc_idx)
57 {
58         uint32_t desc_avail, desc_offset;
59         uint64_t desc_chunck_len;
60         uint32_t mbuf_avail, mbuf_offset;
61         uint32_t cpy_len;
62         struct vring_desc *desc;
63         uint64_t desc_addr, desc_gaddr;
64         struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0};
65         /* A counter to avoid desc dead loop chain */
66         uint16_t nr_desc = 1;
67
68         desc = &vr->desc[desc_idx];
69         desc_chunck_len = desc->len;
70         desc_gaddr = desc->addr;
71         desc_addr = rte_vhost_va_from_guest_pa(
72                         dev->mem, desc_gaddr, &desc_chunck_len);
73         /*
74          * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
75          * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
76          * otherwise stores offset on the stack instead of in a register.
77          */
78         if (unlikely(desc->len < dev->hdr_len) || !desc_addr)
79                 return -1;
80
81         rte_prefetch0((void *)(uintptr_t)desc_addr);
82
83         /* write virtio-net header */
84         if (likely(desc_chunck_len >= dev->hdr_len)) {
85                 *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;
86                 desc_offset = dev->hdr_len;
87         } else {
88                 uint64_t len;
89                 uint64_t remain = dev->hdr_len;
90                 uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr, dst;
91                 uint64_t guest_addr = desc_gaddr;
92
93                 while (remain) {
94                         len = remain;
95                         dst = rte_vhost_va_from_guest_pa(dev->mem,
96                                         guest_addr, &len);
97                         if (unlikely(!dst || !len))
98                                 return -1;
99
100                         rte_memcpy((void *)(uintptr_t)dst,
101                                         (void *)(uintptr_t)src,
102                                         len);
103
104                         remain -= len;
105                         guest_addr += len;
106                         src += len;
107                 }
108
109                 desc_chunck_len = desc->len - dev->hdr_len;
110                 desc_gaddr += dev->hdr_len;
111                 desc_addr = rte_vhost_va_from_guest_pa(
112                                 dev->mem, desc_gaddr,
113                                 &desc_chunck_len);
114                 if (unlikely(!desc_addr))
115                         return -1;
116
117                 desc_offset = 0;
118         }
119
120         desc_avail  = desc->len - dev->hdr_len;
121
122         mbuf_avail  = rte_pktmbuf_data_len(m);
123         mbuf_offset = 0;
124         while (mbuf_avail != 0 || m->next != NULL) {
125                 /* done with current mbuf, fetch next */
126                 if (mbuf_avail == 0) {
127                         m = m->next;
128
129                         mbuf_offset = 0;
130                         mbuf_avail  = rte_pktmbuf_data_len(m);
131                 }
132
133                 /* done with current desc buf, fetch next */
134                 if (desc_avail == 0) {
135                         if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
136                                 /* Room in vring buffer is not enough */
137                                 return -1;
138                         }
139                         if (unlikely(desc->next >= vr->size ||
140                                      ++nr_desc > vr->size))
141                                 return -1;
142
143                         desc = &vr->desc[desc->next];
144                         desc_chunck_len = desc->len;
145                         desc_gaddr = desc->addr;
146                         desc_addr = rte_vhost_va_from_guest_pa(
147                                         dev->mem, desc_gaddr, &desc_chunck_len);
148                         if (unlikely(!desc_addr))
149                                 return -1;
150
151                         desc_offset = 0;
152                         desc_avail  = desc->len;
153                 } else if (unlikely(desc_chunck_len == 0)) {
154                         desc_chunck_len = desc_avail;
155                         desc_gaddr += desc_offset;
156                         desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
157                                         desc_gaddr,
158                                         &desc_chunck_len);
159                         if (unlikely(!desc_addr))
160                                 return -1;
161
162                         desc_offset = 0;
163                 }
164
165                 cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
166                 rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
167                         rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
168                         cpy_len);
169
170                 mbuf_avail  -= cpy_len;
171                 mbuf_offset += cpy_len;
172                 desc_avail  -= cpy_len;
173                 desc_offset += cpy_len;
174                 desc_chunck_len -= cpy_len;
175         }
176
177         return 0;
178 }
179
180 uint16_t
181 vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
182                 struct rte_mbuf **pkts, uint32_t count)
183 {
184         struct vhost_queue *queue;
185         struct rte_vhost_vring *vr;
186         uint16_t avail_idx, free_entries, start_idx;
187         uint16_t desc_indexes[MAX_PKT_BURST];
188         uint16_t used_idx;
189         uint32_t i;
190
191         queue = &dev->queues[queue_id];
192         vr    = &queue->vr;
193
194         avail_idx = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE);
195         start_idx = queue->last_used_idx;
196         free_entries = avail_idx - start_idx;
197         count = RTE_MIN(count, free_entries);
198         count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
199         if (count == 0)
200                 return 0;
201
202         /* Retrieve all of the desc indexes first to avoid caching issues. */
203         rte_prefetch0(&vr->avail->ring[start_idx & (vr->size - 1)]);
204         for (i = 0; i < count; i++) {
205                 used_idx = (start_idx + i) & (vr->size - 1);
206                 desc_indexes[i] = vr->avail->ring[used_idx];
207                 vr->used->ring[used_idx].id = desc_indexes[i];
208                 vr->used->ring[used_idx].len = pkts[i]->pkt_len +
209                                                dev->hdr_len;
210         }
211
212         rte_prefetch0(&vr->desc[desc_indexes[0]]);
213         for (i = 0; i < count; i++) {
214                 uint16_t desc_idx = desc_indexes[i];
215                 int err;
216
217                 err = enqueue_pkt(dev, vr, pkts[i], desc_idx);
218                 if (unlikely(err)) {
219                         used_idx = (start_idx + i) & (vr->size - 1);
220                         vr->used->ring[used_idx].len = dev->hdr_len;
221                 }
222
223                 if (i + 1 < count)
224                         rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
225         }
226
227         __atomic_add_fetch(&vr->used->idx, count, __ATOMIC_RELEASE);
228         queue->last_used_idx += count;
229
230         rte_vhost_vring_call(dev->vid, queue_id);
231
232         return count;
233 }
234
235 static __rte_always_inline int
236 dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
237             struct rte_mbuf *m, uint16_t desc_idx,
238             struct rte_mempool *mbuf_pool)
239 {
240         struct vring_desc *desc;
241         uint64_t desc_addr, desc_gaddr;
242         uint32_t desc_avail, desc_offset;
243         uint64_t desc_chunck_len;
244         uint32_t mbuf_avail, mbuf_offset;
245         uint32_t cpy_len;
246         struct rte_mbuf *cur = m, *prev = m;
247         /* A counter to avoid desc dead loop chain */
248         uint32_t nr_desc = 1;
249
250         desc = &vr->desc[desc_idx];
251         if (unlikely((desc->len < dev->hdr_len)) ||
252                         (desc->flags & VRING_DESC_F_INDIRECT))
253                 return -1;
254
255         desc_chunck_len = desc->len;
256         desc_gaddr = desc->addr;
257         desc_addr = rte_vhost_va_from_guest_pa(
258                         dev->mem, desc_gaddr, &desc_chunck_len);
259         if (unlikely(!desc_addr))
260                 return -1;
261
262         /*
263          * We don't support ANY_LAYOUT, neither VERSION_1, meaning
264          * a Tx packet from guest must have 2 desc buffers at least:
265          * the first for storing the header and the others for
266          * storing the data.
267          *
268          * And since we don't support TSO, we could simply skip the
269          * header.
270          */
271         desc = &vr->desc[desc->next];
272         desc_chunck_len = desc->len;
273         desc_gaddr = desc->addr;
274         desc_addr = rte_vhost_va_from_guest_pa(
275                         dev->mem, desc_gaddr, &desc_chunck_len);
276         if (unlikely(!desc_addr))
277                 return -1;
278         rte_prefetch0((void *)(uintptr_t)desc_addr);
279
280         desc_offset = 0;
281         desc_avail  = desc->len;
282         nr_desc    += 1;
283
284         mbuf_offset = 0;
285         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
286         while (1) {
287                 cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
288                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
289                                                    mbuf_offset),
290                         (void *)((uintptr_t)(desc_addr + desc_offset)),
291                         cpy_len);
292
293                 mbuf_avail  -= cpy_len;
294                 mbuf_offset += cpy_len;
295                 desc_avail  -= cpy_len;
296                 desc_offset += cpy_len;
297                 desc_chunck_len -= cpy_len;
298
299                 /* This desc reaches to its end, get the next one */
300                 if (desc_avail == 0) {
301                         if ((desc->flags & VRING_DESC_F_NEXT) == 0)
302                                 break;
303
304                         if (unlikely(desc->next >= vr->size ||
305                                      ++nr_desc > vr->size))
306                                 return -1;
307                         desc = &vr->desc[desc->next];
308
309                         desc_chunck_len = desc->len;
310                         desc_gaddr = desc->addr;
311                         desc_addr = rte_vhost_va_from_guest_pa(
312                                         dev->mem, desc_gaddr, &desc_chunck_len);
313                         if (unlikely(!desc_addr))
314                                 return -1;
315                         rte_prefetch0((void *)(uintptr_t)desc_addr);
316
317                         desc_offset = 0;
318                         desc_avail  = desc->len;
319                 } else if (unlikely(desc_chunck_len == 0)) {
320                         desc_chunck_len = desc_avail;
321                         desc_gaddr += desc_offset;
322                         desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
323                                         desc_gaddr,
324                                         &desc_chunck_len);
325                         if (unlikely(!desc_addr))
326                                 return -1;
327
328                         desc_offset = 0;
329                 }
330
331                 /*
332                  * This mbuf reaches to its end, get a new one
333                  * to hold more data.
334                  */
335                 if (mbuf_avail == 0) {
336                         cur = rte_pktmbuf_alloc(mbuf_pool);
337                         if (unlikely(cur == NULL)) {
338                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
339                                         "allocate memory for mbuf.\n");
340                                 return -1;
341                         }
342
343                         prev->next = cur;
344                         prev->data_len = mbuf_offset;
345                         m->nb_segs += 1;
346                         m->pkt_len += mbuf_offset;
347                         prev = cur;
348
349                         mbuf_offset = 0;
350                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
351                 }
352         }
353
354         prev->data_len = mbuf_offset;
355         m->pkt_len    += mbuf_offset;
356
357         return 0;
358 }
359
360 uint16_t
361 vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
362         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
363 {
364         struct vhost_queue *queue;
365         struct rte_vhost_vring *vr;
366         uint32_t desc_indexes[MAX_PKT_BURST];
367         uint32_t used_idx;
368         uint32_t i = 0;
369         uint16_t free_entries;
370         uint16_t avail_idx;
371
372         queue = &dev->queues[queue_id];
373         vr    = &queue->vr;
374
375         free_entries = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE) -
376                         queue->last_avail_idx;
377         if (free_entries == 0)
378                 return 0;
379
380         /* Prefetch available and used ring */
381         avail_idx = queue->last_avail_idx & (vr->size - 1);
382         used_idx  = queue->last_used_idx  & (vr->size - 1);
383         rte_prefetch0(&vr->avail->ring[avail_idx]);
384         rte_prefetch0(&vr->used->ring[used_idx]);
385
386         count = RTE_MIN(count, MAX_PKT_BURST);
387         count = RTE_MIN(count, free_entries);
388
389         if (unlikely(count == 0))
390                 return 0;
391
392         /*
393          * Retrieve all of the head indexes first and pre-update used entries
394          * to avoid caching issues.
395          */
396         for (i = 0; i < count; i++) {
397                 avail_idx = (queue->last_avail_idx + i) & (vr->size - 1);
398                 used_idx  = (queue->last_used_idx  + i) & (vr->size - 1);
399                 desc_indexes[i] = vr->avail->ring[avail_idx];
400
401                 vr->used->ring[used_idx].id  = desc_indexes[i];
402                 vr->used->ring[used_idx].len = 0;
403         }
404
405         /* Prefetch descriptor index. */
406         rte_prefetch0(&vr->desc[desc_indexes[0]]);
407         for (i = 0; i < count; i++) {
408                 int err;
409
410                 if (likely(i + 1 < count))
411                         rte_prefetch0(&vr->desc[desc_indexes[i + 1]]);
412
413                 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
414                 if (unlikely(pkts[i] == NULL)) {
415                         RTE_LOG(ERR, VHOST_DATA,
416                                 "Failed to allocate memory for mbuf.\n");
417                         break;
418                 }
419
420                 err = dequeue_pkt(dev, vr, pkts[i], desc_indexes[i], mbuf_pool);
421                 if (unlikely(err)) {
422                         rte_pktmbuf_free(pkts[i]);
423                         break;
424                 }
425
426         }
427
428         queue->last_avail_idx += i;
429         queue->last_used_idx += i;
430
431         __atomic_add_fetch(&vr->used->idx, i, __ATOMIC_ACQ_REL);
432
433         rte_vhost_vring_call(dev->vid, queue_id);
434
435         return i;
436 }