vhost: fix translated address not checked
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static  __rte_always_inline bool
35 virtio_net_is_inorder(struct virtio_net *dev)
36 {
37         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
38 }
39
40 static bool
41 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
42 {
43         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
44 }
45
46 static inline void
47 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
48 {
49         struct batch_copy_elem *elem = vq->batch_copy_elems;
50         uint16_t count = vq->batch_copy_nb_elems;
51         int i;
52
53         for (i = 0; i < count; i++) {
54                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
55                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
56                                            elem[i].len);
57                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
58         }
59
60         vq->batch_copy_nb_elems = 0;
61 }
62
63 static inline void
64 do_data_copy_dequeue(struct vhost_virtqueue *vq)
65 {
66         struct batch_copy_elem *elem = vq->batch_copy_elems;
67         uint16_t count = vq->batch_copy_nb_elems;
68         int i;
69
70         for (i = 0; i < count; i++)
71                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
72
73         vq->batch_copy_nb_elems = 0;
74 }
75
76 static __rte_always_inline void
77 do_flush_shadow_used_ring_split(struct virtio_net *dev,
78                         struct vhost_virtqueue *vq,
79                         uint16_t to, uint16_t from, uint16_t size)
80 {
81         rte_memcpy(&vq->used->ring[to],
82                         &vq->shadow_used_split[from],
83                         size * sizeof(struct vring_used_elem));
84         vhost_log_cache_used_vring(dev, vq,
85                         offsetof(struct vring_used, ring[to]),
86                         size * sizeof(struct vring_used_elem));
87 }
88
89 static __rte_always_inline void
90 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
91 {
92         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
93
94         if (used_idx + vq->shadow_used_idx <= vq->size) {
95                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
96                                           vq->shadow_used_idx);
97         } else {
98                 uint16_t size;
99
100                 /* update used ring interval [used_idx, vq->size] */
101                 size = vq->size - used_idx;
102                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
103
104                 /* update the left half used ring interval [0, left_size] */
105                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
106                                           vq->shadow_used_idx - size);
107         }
108         vq->last_used_idx += vq->shadow_used_idx;
109
110         vhost_log_cache_sync(dev, vq);
111
112         __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
113                            __ATOMIC_RELEASE);
114         vq->shadow_used_idx = 0;
115         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
116                 sizeof(vq->used->idx));
117 }
118
119 static __rte_always_inline void
120 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
121                          uint16_t desc_idx, uint32_t len)
122 {
123         uint16_t i = vq->shadow_used_idx++;
124
125         vq->shadow_used_split[i].id  = desc_idx;
126         vq->shadow_used_split[i].len = len;
127 }
128
129 static __rte_always_inline void
130 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
131                                   struct vhost_virtqueue *vq)
132 {
133         int i;
134         uint16_t used_idx = vq->last_used_idx;
135         uint16_t head_idx = vq->last_used_idx;
136         uint16_t head_flags = 0;
137
138         /* Split loop in two to save memory barriers */
139         for (i = 0; i < vq->shadow_used_idx; i++) {
140                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
141                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
142
143                 used_idx += vq->shadow_used_packed[i].count;
144                 if (used_idx >= vq->size)
145                         used_idx -= vq->size;
146         }
147
148         rte_smp_wmb();
149
150         for (i = 0; i < vq->shadow_used_idx; i++) {
151                 uint16_t flags;
152
153                 if (vq->shadow_used_packed[i].len)
154                         flags = VRING_DESC_F_WRITE;
155                 else
156                         flags = 0;
157
158                 if (vq->used_wrap_counter) {
159                         flags |= VRING_DESC_F_USED;
160                         flags |= VRING_DESC_F_AVAIL;
161                 } else {
162                         flags &= ~VRING_DESC_F_USED;
163                         flags &= ~VRING_DESC_F_AVAIL;
164                 }
165
166                 if (i > 0) {
167                         vq->desc_packed[vq->last_used_idx].flags = flags;
168
169                         vhost_log_cache_used_vring(dev, vq,
170                                         vq->last_used_idx *
171                                         sizeof(struct vring_packed_desc),
172                                         sizeof(struct vring_packed_desc));
173                 } else {
174                         head_idx = vq->last_used_idx;
175                         head_flags = flags;
176                 }
177
178                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
179         }
180
181         vq->desc_packed[head_idx].flags = head_flags;
182
183         vhost_log_cache_used_vring(dev, vq,
184                                 head_idx *
185                                 sizeof(struct vring_packed_desc),
186                                 sizeof(struct vring_packed_desc));
187
188         vq->shadow_used_idx = 0;
189         vhost_log_cache_sync(dev, vq);
190 }
191
192 static __rte_always_inline void
193 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
194                                   struct vhost_virtqueue *vq)
195 {
196         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
197
198         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
199         rte_smp_wmb();
200         vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
201
202         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
203                                    sizeof(struct vring_packed_desc),
204                                    sizeof(struct vring_packed_desc));
205         vq->shadow_used_idx = 0;
206         vhost_log_cache_sync(dev, vq);
207 }
208
209 static __rte_always_inline void
210 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
211                                  struct vhost_virtqueue *vq,
212                                  uint64_t *lens,
213                                  uint16_t *ids)
214 {
215         uint16_t i;
216         uint16_t flags;
217
218         if (vq->shadow_used_idx) {
219                 do_data_copy_enqueue(dev, vq);
220                 vhost_flush_enqueue_shadow_packed(dev, vq);
221         }
222
223         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
224
225         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
226                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
227                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
228         }
229
230         rte_smp_wmb();
231
232         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
233                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
234
235         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
236                                    sizeof(struct vring_packed_desc),
237                                    sizeof(struct vring_packed_desc) *
238                                    PACKED_BATCH_SIZE);
239         vhost_log_cache_sync(dev, vq);
240
241         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
242 }
243
244 static __rte_always_inline void
245 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
246                                           uint16_t id)
247 {
248         vq->shadow_used_packed[0].id = id;
249
250         if (!vq->shadow_used_idx) {
251                 vq->shadow_last_used_idx = vq->last_used_idx;
252                 vq->shadow_used_packed[0].flags =
253                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
254                 vq->shadow_used_packed[0].len = 0;
255                 vq->shadow_used_packed[0].count = 1;
256                 vq->shadow_used_idx++;
257         }
258
259         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
260 }
261
262 static __rte_always_inline void
263 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
264                                   struct vhost_virtqueue *vq,
265                                   uint16_t *ids)
266 {
267         uint16_t flags;
268         uint16_t i;
269         uint16_t begin;
270
271         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
272
273         if (!vq->shadow_used_idx) {
274                 vq->shadow_last_used_idx = vq->last_used_idx;
275                 vq->shadow_used_packed[0].id  = ids[0];
276                 vq->shadow_used_packed[0].len = 0;
277                 vq->shadow_used_packed[0].count = 1;
278                 vq->shadow_used_packed[0].flags = flags;
279                 vq->shadow_used_idx++;
280                 begin = 1;
281         } else
282                 begin = 0;
283
284         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
285                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
286                 vq->desc_packed[vq->last_used_idx + i].len = 0;
287         }
288
289         rte_smp_wmb();
290         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
291                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
292
293         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
294                                    sizeof(struct vring_packed_desc),
295                                    sizeof(struct vring_packed_desc) *
296                                    PACKED_BATCH_SIZE);
297         vhost_log_cache_sync(dev, vq);
298
299         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
300 }
301
302 static __rte_always_inline void
303 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
304                                    uint16_t buf_id,
305                                    uint16_t count)
306 {
307         uint16_t flags;
308
309         flags = vq->desc_packed[vq->last_used_idx].flags;
310         if (vq->used_wrap_counter) {
311                 flags |= VRING_DESC_F_USED;
312                 flags |= VRING_DESC_F_AVAIL;
313         } else {
314                 flags &= ~VRING_DESC_F_USED;
315                 flags &= ~VRING_DESC_F_AVAIL;
316         }
317
318         if (!vq->shadow_used_idx) {
319                 vq->shadow_last_used_idx = vq->last_used_idx;
320
321                 vq->shadow_used_packed[0].id  = buf_id;
322                 vq->shadow_used_packed[0].len = 0;
323                 vq->shadow_used_packed[0].flags = flags;
324                 vq->shadow_used_idx++;
325         } else {
326                 vq->desc_packed[vq->last_used_idx].id = buf_id;
327                 vq->desc_packed[vq->last_used_idx].len = 0;
328                 vq->desc_packed[vq->last_used_idx].flags = flags;
329         }
330
331         vq_inc_last_used_packed(vq, count);
332 }
333
334 static __rte_always_inline void
335 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
336                                            uint16_t buf_id,
337                                            uint16_t count)
338 {
339         uint16_t flags;
340
341         vq->shadow_used_packed[0].id = buf_id;
342
343         flags = vq->desc_packed[vq->last_used_idx].flags;
344         if (vq->used_wrap_counter) {
345                 flags |= VRING_DESC_F_USED;
346                 flags |= VRING_DESC_F_AVAIL;
347         } else {
348                 flags &= ~VRING_DESC_F_USED;
349                 flags &= ~VRING_DESC_F_AVAIL;
350         }
351
352         if (!vq->shadow_used_idx) {
353                 vq->shadow_last_used_idx = vq->last_used_idx;
354                 vq->shadow_used_packed[0].len = 0;
355                 vq->shadow_used_packed[0].flags = flags;
356                 vq->shadow_used_idx++;
357         }
358
359         vq_inc_last_used_packed(vq, count);
360 }
361
362 static __rte_always_inline void
363 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
364                                    struct vhost_virtqueue *vq,
365                                    uint32_t len[],
366                                    uint16_t id[],
367                                    uint16_t count[],
368                                    uint16_t num_buffers)
369 {
370         uint16_t i;
371         for (i = 0; i < num_buffers; i++) {
372                 /* enqueue shadow flush action aligned with batch num */
373                 if (!vq->shadow_used_idx)
374                         vq->shadow_aligned_idx = vq->last_used_idx &
375                                 PACKED_BATCH_MASK;
376                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
377                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
378                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
379                 vq->shadow_aligned_idx += count[i];
380                 vq->shadow_used_idx++;
381         }
382
383         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
384                 do_data_copy_enqueue(dev, vq);
385                 vhost_flush_enqueue_shadow_packed(dev, vq);
386         }
387 }
388
389 /* avoid write operation when necessary, to lessen cache issues */
390 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
391         if ((var) != (val))                     \
392                 (var) = (val);                  \
393 } while (0)
394
395 static __rte_always_inline void
396 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
397 {
398         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
399
400         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
401                 csum_l4 |= PKT_TX_TCP_CKSUM;
402
403         if (csum_l4) {
404                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
405                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
406
407                 switch (csum_l4) {
408                 case PKT_TX_TCP_CKSUM:
409                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
410                                                 cksum));
411                         break;
412                 case PKT_TX_UDP_CKSUM:
413                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
414                                                 dgram_cksum));
415                         break;
416                 case PKT_TX_SCTP_CKSUM:
417                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
418                                                 cksum));
419                         break;
420                 }
421         } else {
422                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
423                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
424                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
425         }
426
427         /* IP cksum verification cannot be bypassed, then calculate here */
428         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
429                 struct rte_ipv4_hdr *ipv4_hdr;
430
431                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
432                                                    m_buf->l2_len);
433                 ipv4_hdr->hdr_checksum = 0;
434                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
435         }
436
437         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
438                 if (m_buf->ol_flags & PKT_TX_IPV4)
439                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
440                 else
441                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
442                 net_hdr->gso_size = m_buf->tso_segsz;
443                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
444                                         + m_buf->l4_len;
445         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
446                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
447                 net_hdr->gso_size = m_buf->tso_segsz;
448                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
449                         m_buf->l4_len;
450         } else {
451                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
452                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
453                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
454         }
455 }
456
457 static __rte_always_inline int
458 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
459                 struct buf_vector *buf_vec, uint16_t *vec_idx,
460                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
461 {
462         uint16_t vec_id = *vec_idx;
463
464         while (desc_len) {
465                 uint64_t desc_addr;
466                 uint64_t desc_chunck_len = desc_len;
467
468                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
469                         return -1;
470
471                 desc_addr = vhost_iova_to_vva(dev, vq,
472                                 desc_iova,
473                                 &desc_chunck_len,
474                                 perm);
475                 if (unlikely(!desc_addr))
476                         return -1;
477
478                 rte_prefetch0((void *)(uintptr_t)desc_addr);
479
480                 buf_vec[vec_id].buf_iova = desc_iova;
481                 buf_vec[vec_id].buf_addr = desc_addr;
482                 buf_vec[vec_id].buf_len  = desc_chunck_len;
483
484                 desc_len -= desc_chunck_len;
485                 desc_iova += desc_chunck_len;
486                 vec_id++;
487         }
488         *vec_idx = vec_id;
489
490         return 0;
491 }
492
493 static __rte_always_inline int
494 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
495                          uint32_t avail_idx, uint16_t *vec_idx,
496                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
497                          uint32_t *desc_chain_len, uint8_t perm)
498 {
499         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
500         uint16_t vec_id = *vec_idx;
501         uint32_t len    = 0;
502         uint64_t dlen;
503         uint32_t nr_descs = vq->size;
504         uint32_t cnt    = 0;
505         struct vring_desc *descs = vq->desc;
506         struct vring_desc *idesc = NULL;
507
508         if (unlikely(idx >= vq->size))
509                 return -1;
510
511         *desc_chain_head = idx;
512
513         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
514                 dlen = vq->desc[idx].len;
515                 nr_descs = dlen / sizeof(struct vring_desc);
516                 if (unlikely(nr_descs > vq->size))
517                         return -1;
518
519                 descs = (struct vring_desc *)(uintptr_t)
520                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
521                                                 &dlen,
522                                                 VHOST_ACCESS_RO);
523                 if (unlikely(!descs))
524                         return -1;
525
526                 if (unlikely(dlen < vq->desc[idx].len)) {
527                         /*
528                          * The indirect desc table is not contiguous
529                          * in process VA space, we have to copy it.
530                          */
531                         idesc = vhost_alloc_copy_ind_table(dev, vq,
532                                         vq->desc[idx].addr, vq->desc[idx].len);
533                         if (unlikely(!idesc))
534                                 return -1;
535
536                         descs = idesc;
537                 }
538
539                 idx = 0;
540         }
541
542         while (1) {
543                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
544                         free_ind_table(idesc);
545                         return -1;
546                 }
547
548                 len += descs[idx].len;
549
550                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
551                                                 descs[idx].addr, descs[idx].len,
552                                                 perm))) {
553                         free_ind_table(idesc);
554                         return -1;
555                 }
556
557                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
558                         break;
559
560                 idx = descs[idx].next;
561         }
562
563         *desc_chain_len = len;
564         *vec_idx = vec_id;
565
566         if (unlikely(!!idesc))
567                 free_ind_table(idesc);
568
569         return 0;
570 }
571
572 /*
573  * Returns -1 on fail, 0 on success
574  */
575 static inline int
576 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
577                                 uint32_t size, struct buf_vector *buf_vec,
578                                 uint16_t *num_buffers, uint16_t avail_head,
579                                 uint16_t *nr_vec)
580 {
581         uint16_t cur_idx;
582         uint16_t vec_idx = 0;
583         uint16_t max_tries, tries = 0;
584
585         uint16_t head_idx = 0;
586         uint32_t len = 0;
587
588         *num_buffers = 0;
589         cur_idx  = vq->last_avail_idx;
590
591         if (rxvq_is_mergeable(dev))
592                 max_tries = vq->size - 1;
593         else
594                 max_tries = 1;
595
596         while (size > 0) {
597                 if (unlikely(cur_idx == avail_head))
598                         return -1;
599                 /*
600                  * if we tried all available ring items, and still
601                  * can't get enough buf, it means something abnormal
602                  * happened.
603                  */
604                 if (unlikely(++tries > max_tries))
605                         return -1;
606
607                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
608                                                 &vec_idx, buf_vec,
609                                                 &head_idx, &len,
610                                                 VHOST_ACCESS_RW) < 0))
611                         return -1;
612                 len = RTE_MIN(len, size);
613                 update_shadow_used_ring_split(vq, head_idx, len);
614                 size -= len;
615
616                 cur_idx++;
617                 *num_buffers += 1;
618         }
619
620         *nr_vec = vec_idx;
621
622         return 0;
623 }
624
625 static __rte_always_inline int
626 fill_vec_buf_packed_indirect(struct virtio_net *dev,
627                         struct vhost_virtqueue *vq,
628                         struct vring_packed_desc *desc, uint16_t *vec_idx,
629                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
630 {
631         uint16_t i;
632         uint32_t nr_descs;
633         uint16_t vec_id = *vec_idx;
634         uint64_t dlen;
635         struct vring_packed_desc *descs, *idescs = NULL;
636
637         dlen = desc->len;
638         descs = (struct vring_packed_desc *)(uintptr_t)
639                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
640         if (unlikely(!descs))
641                 return -1;
642
643         if (unlikely(dlen < desc->len)) {
644                 /*
645                  * The indirect desc table is not contiguous
646                  * in process VA space, we have to copy it.
647                  */
648                 idescs = vhost_alloc_copy_ind_table(dev,
649                                 vq, desc->addr, desc->len);
650                 if (unlikely(!idescs))
651                         return -1;
652
653                 descs = idescs;
654         }
655
656         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
657         if (unlikely(nr_descs >= vq->size)) {
658                 free_ind_table(idescs);
659                 return -1;
660         }
661
662         for (i = 0; i < nr_descs; i++) {
663                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
664                         free_ind_table(idescs);
665                         return -1;
666                 }
667
668                 *len += descs[i].len;
669                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
670                                                 descs[i].addr, descs[i].len,
671                                                 perm)))
672                         return -1;
673         }
674         *vec_idx = vec_id;
675
676         if (unlikely(!!idescs))
677                 free_ind_table(idescs);
678
679         return 0;
680 }
681
682 static __rte_always_inline int
683 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
684                                 uint16_t avail_idx, uint16_t *desc_count,
685                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
686                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
687 {
688         bool wrap_counter = vq->avail_wrap_counter;
689         struct vring_packed_desc *descs = vq->desc_packed;
690         uint16_t vec_id = *vec_idx;
691
692         if (avail_idx < vq->last_avail_idx)
693                 wrap_counter ^= 1;
694
695         /*
696          * Perform a load-acquire barrier in desc_is_avail to
697          * enforce the ordering between desc flags and desc
698          * content.
699          */
700         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
701                 return -1;
702
703         *desc_count = 0;
704         *len = 0;
705
706         while (1) {
707                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
708                         return -1;
709
710                 if (unlikely(*desc_count >= vq->size))
711                         return -1;
712
713                 *desc_count += 1;
714                 *buf_id = descs[avail_idx].id;
715
716                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
717                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
718                                                         &descs[avail_idx],
719                                                         &vec_id, buf_vec,
720                                                         len, perm) < 0))
721                                 return -1;
722                 } else {
723                         *len += descs[avail_idx].len;
724
725                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
726                                                         descs[avail_idx].addr,
727                                                         descs[avail_idx].len,
728                                                         perm)))
729                                 return -1;
730                 }
731
732                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
733                         break;
734
735                 if (++avail_idx >= vq->size) {
736                         avail_idx -= vq->size;
737                         wrap_counter ^= 1;
738                 }
739         }
740
741         *vec_idx = vec_id;
742
743         return 0;
744 }
745
746 static __rte_noinline void
747 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
748                 struct buf_vector *buf_vec,
749                 struct virtio_net_hdr_mrg_rxbuf *hdr)
750 {
751         uint64_t len;
752         uint64_t remain = dev->vhost_hlen;
753         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
754         uint64_t iova = buf_vec->buf_iova;
755
756         while (remain) {
757                 len = RTE_MIN(remain,
758                                 buf_vec->buf_len);
759                 dst = buf_vec->buf_addr;
760                 rte_memcpy((void *)(uintptr_t)dst,
761                                 (void *)(uintptr_t)src,
762                                 len);
763
764                 PRINT_PACKET(dev, (uintptr_t)dst,
765                                 (uint32_t)len, 0);
766                 vhost_log_cache_write_iova(dev, vq,
767                                 iova, len);
768
769                 remain -= len;
770                 iova += len;
771                 src += len;
772                 buf_vec++;
773         }
774 }
775
776 static __rte_always_inline int
777 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
778                             struct rte_mbuf *m, struct buf_vector *buf_vec,
779                             uint16_t nr_vec, uint16_t num_buffers)
780 {
781         uint32_t vec_idx = 0;
782         uint32_t mbuf_offset, mbuf_avail;
783         uint32_t buf_offset, buf_avail;
784         uint64_t buf_addr, buf_iova, buf_len;
785         uint32_t cpy_len;
786         uint64_t hdr_addr;
787         struct rte_mbuf *hdr_mbuf;
788         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
789         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
790         int error = 0;
791
792         if (unlikely(m == NULL)) {
793                 error = -1;
794                 goto out;
795         }
796
797         buf_addr = buf_vec[vec_idx].buf_addr;
798         buf_iova = buf_vec[vec_idx].buf_iova;
799         buf_len = buf_vec[vec_idx].buf_len;
800
801         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
802                 error = -1;
803                 goto out;
804         }
805
806         hdr_mbuf = m;
807         hdr_addr = buf_addr;
808         if (unlikely(buf_len < dev->vhost_hlen))
809                 hdr = &tmp_hdr;
810         else
811                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
812
813         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
814                 dev->vid, num_buffers);
815
816         if (unlikely(buf_len < dev->vhost_hlen)) {
817                 buf_offset = dev->vhost_hlen - buf_len;
818                 vec_idx++;
819                 buf_addr = buf_vec[vec_idx].buf_addr;
820                 buf_iova = buf_vec[vec_idx].buf_iova;
821                 buf_len = buf_vec[vec_idx].buf_len;
822                 buf_avail = buf_len - buf_offset;
823         } else {
824                 buf_offset = dev->vhost_hlen;
825                 buf_avail = buf_len - dev->vhost_hlen;
826         }
827
828         mbuf_avail  = rte_pktmbuf_data_len(m);
829         mbuf_offset = 0;
830         while (mbuf_avail != 0 || m->next != NULL) {
831                 /* done with current buf, get the next one */
832                 if (buf_avail == 0) {
833                         vec_idx++;
834                         if (unlikely(vec_idx >= nr_vec)) {
835                                 error = -1;
836                                 goto out;
837                         }
838
839                         buf_addr = buf_vec[vec_idx].buf_addr;
840                         buf_iova = buf_vec[vec_idx].buf_iova;
841                         buf_len = buf_vec[vec_idx].buf_len;
842
843                         buf_offset = 0;
844                         buf_avail  = buf_len;
845                 }
846
847                 /* done with current mbuf, get the next one */
848                 if (mbuf_avail == 0) {
849                         m = m->next;
850
851                         mbuf_offset = 0;
852                         mbuf_avail  = rte_pktmbuf_data_len(m);
853                 }
854
855                 if (hdr_addr) {
856                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
857                         if (rxvq_is_mergeable(dev))
858                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
859                                                 num_buffers);
860
861                         if (unlikely(hdr == &tmp_hdr)) {
862                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
863                         } else {
864                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
865                                                 dev->vhost_hlen, 0);
866                                 vhost_log_cache_write_iova(dev, vq,
867                                                 buf_vec[0].buf_iova,
868                                                 dev->vhost_hlen);
869                         }
870
871                         hdr_addr = 0;
872                 }
873
874                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
875
876                 if (likely(cpy_len > MAX_BATCH_LEN ||
877                                         vq->batch_copy_nb_elems >= vq->size)) {
878                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
879                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
880                                 cpy_len);
881                         vhost_log_cache_write_iova(dev, vq,
882                                                    buf_iova + buf_offset,
883                                                    cpy_len);
884                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
885                                 cpy_len, 0);
886                 } else {
887                         batch_copy[vq->batch_copy_nb_elems].dst =
888                                 (void *)((uintptr_t)(buf_addr + buf_offset));
889                         batch_copy[vq->batch_copy_nb_elems].src =
890                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
891                         batch_copy[vq->batch_copy_nb_elems].log_addr =
892                                 buf_iova + buf_offset;
893                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
894                         vq->batch_copy_nb_elems++;
895                 }
896
897                 mbuf_avail  -= cpy_len;
898                 mbuf_offset += cpy_len;
899                 buf_avail  -= cpy_len;
900                 buf_offset += cpy_len;
901         }
902
903 out:
904
905         return error;
906 }
907
908 static __rte_always_inline int
909 vhost_enqueue_single_packed(struct virtio_net *dev,
910                             struct vhost_virtqueue *vq,
911                             struct rte_mbuf *pkt,
912                             struct buf_vector *buf_vec,
913                             uint16_t *nr_descs)
914 {
915         uint16_t nr_vec = 0;
916         uint16_t avail_idx = vq->last_avail_idx;
917         uint16_t max_tries, tries = 0;
918         uint16_t buf_id = 0;
919         uint32_t len = 0;
920         uint16_t desc_count;
921         uint32_t size = pkt->pkt_len + dev->vhost_hlen;
922         uint16_t num_buffers = 0;
923         uint32_t buffer_len[vq->size];
924         uint16_t buffer_buf_id[vq->size];
925         uint16_t buffer_desc_count[vq->size];
926
927         if (rxvq_is_mergeable(dev))
928                 max_tries = vq->size - 1;
929         else
930                 max_tries = 1;
931
932         while (size > 0) {
933                 /*
934                  * if we tried all available ring items, and still
935                  * can't get enough buf, it means something abnormal
936                  * happened.
937                  */
938                 if (unlikely(++tries > max_tries))
939                         return -1;
940
941                 if (unlikely(fill_vec_buf_packed(dev, vq,
942                                                 avail_idx, &desc_count,
943                                                 buf_vec, &nr_vec,
944                                                 &buf_id, &len,
945                                                 VHOST_ACCESS_RW) < 0))
946                         return -1;
947
948                 len = RTE_MIN(len, size);
949                 size -= len;
950
951                 buffer_len[num_buffers] = len;
952                 buffer_buf_id[num_buffers] = buf_id;
953                 buffer_desc_count[num_buffers] = desc_count;
954                 num_buffers += 1;
955
956                 *nr_descs += desc_count;
957                 avail_idx += desc_count;
958                 if (avail_idx >= vq->size)
959                         avail_idx -= vq->size;
960         }
961
962         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
963                 return -1;
964
965         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
966                                            buffer_desc_count, num_buffers);
967
968         return 0;
969 }
970
971 static __rte_noinline uint32_t
972 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
973         struct rte_mbuf **pkts, uint32_t count)
974 {
975         uint32_t pkt_idx = 0;
976         uint16_t num_buffers;
977         struct buf_vector buf_vec[BUF_VECTOR_MAX];
978         uint16_t avail_head;
979
980         /*
981          * The ordering between avail index and
982          * desc reads needs to be enforced.
983          */
984         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
985
986         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
987
988         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
989                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
990                 uint16_t nr_vec = 0;
991
992                 if (unlikely(reserve_avail_buf_split(dev, vq,
993                                                 pkt_len, buf_vec, &num_buffers,
994                                                 avail_head, &nr_vec) < 0)) {
995                         VHOST_LOG_DATA(DEBUG,
996                                 "(%d) failed to get enough desc from vring\n",
997                                 dev->vid);
998                         vq->shadow_used_idx -= num_buffers;
999                         break;
1000                 }
1001
1002                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1003                         dev->vid, vq->last_avail_idx,
1004                         vq->last_avail_idx + num_buffers);
1005
1006                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1007                                                 buf_vec, nr_vec,
1008                                                 num_buffers) < 0) {
1009                         vq->shadow_used_idx -= num_buffers;
1010                         break;
1011                 }
1012
1013                 vq->last_avail_idx += num_buffers;
1014         }
1015
1016         do_data_copy_enqueue(dev, vq);
1017
1018         if (likely(vq->shadow_used_idx)) {
1019                 flush_shadow_used_ring_split(dev, vq);
1020                 vhost_vring_call_split(dev, vq);
1021         }
1022
1023         return pkt_idx;
1024 }
1025
1026 static __rte_always_inline int
1027 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1028                            struct vhost_virtqueue *vq,
1029                            struct rte_mbuf **pkts)
1030 {
1031         bool wrap_counter = vq->avail_wrap_counter;
1032         struct vring_packed_desc *descs = vq->desc_packed;
1033         uint16_t avail_idx = vq->last_avail_idx;
1034         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1035         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1036         uint32_t buf_offset = dev->vhost_hlen;
1037         uint64_t lens[PACKED_BATCH_SIZE];
1038         uint16_t ids[PACKED_BATCH_SIZE];
1039         uint16_t i;
1040
1041         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1042                 return -1;
1043
1044         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1045                 return -1;
1046
1047         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1048                 if (unlikely(pkts[i]->next != NULL))
1049                         return -1;
1050                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1051                                             wrap_counter)))
1052                         return -1;
1053         }
1054
1055         rte_smp_rmb();
1056
1057         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1058                 lens[i] = descs[avail_idx + i].len;
1059
1060         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1061                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1062                         return -1;
1063         }
1064
1065         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1066                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1067                                                   descs[avail_idx + i].addr,
1068                                                   &lens[i],
1069                                                   VHOST_ACCESS_RW);
1070
1071         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1072                 if (unlikely(!desc_addrs[i]))
1073                         return -1;
1074                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1075                         return -1;
1076         }
1077
1078         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1079                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1080                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1081                                         (uintptr_t)desc_addrs[i];
1082                 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1083         }
1084
1085         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1086                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1087
1088         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1089
1090         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1091                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1092                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1093                            pkts[i]->pkt_len);
1094         }
1095
1096         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1097                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1098                                            lens[i]);
1099
1100         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1101                 ids[i] = descs[avail_idx + i].id;
1102
1103         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1104
1105         return 0;
1106 }
1107
1108 static __rte_always_inline int16_t
1109 virtio_dev_rx_single_packed(struct virtio_net *dev,
1110                             struct vhost_virtqueue *vq,
1111                             struct rte_mbuf *pkt)
1112 {
1113         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1114         uint16_t nr_descs = 0;
1115
1116         rte_smp_rmb();
1117         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1118                                                  &nr_descs) < 0)) {
1119                 VHOST_LOG_DATA(DEBUG,
1120                                 "(%d) failed to get enough desc from vring\n",
1121                                 dev->vid);
1122                 return -1;
1123         }
1124
1125         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1126                         dev->vid, vq->last_avail_idx,
1127                         vq->last_avail_idx + nr_descs);
1128
1129         vq_inc_last_avail_packed(vq, nr_descs);
1130
1131         return 0;
1132 }
1133
1134 static __rte_noinline uint32_t
1135 virtio_dev_rx_packed(struct virtio_net *dev,
1136                      struct vhost_virtqueue *vq,
1137                      struct rte_mbuf **pkts,
1138                      uint32_t count)
1139 {
1140         uint32_t pkt_idx = 0;
1141         uint32_t remained = count;
1142
1143         do {
1144                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1145
1146                 if (remained >= PACKED_BATCH_SIZE) {
1147                         if (!virtio_dev_rx_batch_packed(dev, vq,
1148                                                         &pkts[pkt_idx])) {
1149                                 pkt_idx += PACKED_BATCH_SIZE;
1150                                 remained -= PACKED_BATCH_SIZE;
1151                                 continue;
1152                         }
1153                 }
1154
1155                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1156                         break;
1157                 pkt_idx++;
1158                 remained--;
1159
1160         } while (pkt_idx < count);
1161
1162         if (vq->shadow_used_idx) {
1163                 do_data_copy_enqueue(dev, vq);
1164                 vhost_flush_enqueue_shadow_packed(dev, vq);
1165         }
1166
1167         if (pkt_idx)
1168                 vhost_vring_call_packed(dev, vq);
1169
1170         return pkt_idx;
1171 }
1172
1173 static __rte_always_inline uint32_t
1174 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1175         struct rte_mbuf **pkts, uint32_t count)
1176 {
1177         struct vhost_virtqueue *vq;
1178         uint32_t nb_tx = 0;
1179
1180         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1181         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1182                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1183                         dev->vid, __func__, queue_id);
1184                 return 0;
1185         }
1186
1187         vq = dev->virtqueue[queue_id];
1188
1189         rte_spinlock_lock(&vq->access_lock);
1190
1191         if (unlikely(vq->enabled == 0))
1192                 goto out_access_unlock;
1193
1194         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1195                 vhost_user_iotlb_rd_lock(vq);
1196
1197         if (unlikely(vq->access_ok == 0))
1198                 if (unlikely(vring_translate(dev, vq) < 0))
1199                         goto out;
1200
1201         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1202         if (count == 0)
1203                 goto out;
1204
1205         if (vq_is_packed(dev))
1206                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1207         else
1208                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1209
1210 out:
1211         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1212                 vhost_user_iotlb_rd_unlock(vq);
1213
1214 out_access_unlock:
1215         rte_spinlock_unlock(&vq->access_lock);
1216
1217         return nb_tx;
1218 }
1219
1220 uint16_t
1221 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1222         struct rte_mbuf **pkts, uint16_t count)
1223 {
1224         struct virtio_net *dev = get_device(vid);
1225
1226         if (!dev)
1227                 return 0;
1228
1229         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1230                 VHOST_LOG_DATA(ERR,
1231                         "(%d) %s: built-in vhost net backend is disabled.\n",
1232                         dev->vid, __func__);
1233                 return 0;
1234         }
1235
1236         return virtio_dev_rx(dev, queue_id, pkts, count);
1237 }
1238
1239 static inline bool
1240 virtio_net_with_host_offload(struct virtio_net *dev)
1241 {
1242         if (dev->features &
1243                         ((1ULL << VIRTIO_NET_F_CSUM) |
1244                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1245                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1246                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1247                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1248                 return true;
1249
1250         return false;
1251 }
1252
1253 static void
1254 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1255 {
1256         struct rte_ipv4_hdr *ipv4_hdr;
1257         struct rte_ipv6_hdr *ipv6_hdr;
1258         void *l3_hdr = NULL;
1259         struct rte_ether_hdr *eth_hdr;
1260         uint16_t ethertype;
1261
1262         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1263
1264         m->l2_len = sizeof(struct rte_ether_hdr);
1265         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1266
1267         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1268                 struct rte_vlan_hdr *vlan_hdr =
1269                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1270
1271                 m->l2_len += sizeof(struct rte_vlan_hdr);
1272                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1273         }
1274
1275         l3_hdr = (char *)eth_hdr + m->l2_len;
1276
1277         switch (ethertype) {
1278         case RTE_ETHER_TYPE_IPV4:
1279                 ipv4_hdr = l3_hdr;
1280                 *l4_proto = ipv4_hdr->next_proto_id;
1281                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1282                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1283                 m->ol_flags |= PKT_TX_IPV4;
1284                 break;
1285         case RTE_ETHER_TYPE_IPV6:
1286                 ipv6_hdr = l3_hdr;
1287                 *l4_proto = ipv6_hdr->proto;
1288                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1289                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1290                 m->ol_flags |= PKT_TX_IPV6;
1291                 break;
1292         default:
1293                 m->l3_len = 0;
1294                 *l4_proto = 0;
1295                 *l4_hdr = NULL;
1296                 break;
1297         }
1298 }
1299
1300 static __rte_always_inline void
1301 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1302 {
1303         uint16_t l4_proto = 0;
1304         void *l4_hdr = NULL;
1305         struct rte_tcp_hdr *tcp_hdr = NULL;
1306
1307         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1308                 return;
1309
1310         parse_ethernet(m, &l4_proto, &l4_hdr);
1311         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1312                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1313                         switch (hdr->csum_offset) {
1314                         case (offsetof(struct rte_tcp_hdr, cksum)):
1315                                 if (l4_proto == IPPROTO_TCP)
1316                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1317                                 break;
1318                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1319                                 if (l4_proto == IPPROTO_UDP)
1320                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1321                                 break;
1322                         case (offsetof(struct rte_sctp_hdr, cksum)):
1323                                 if (l4_proto == IPPROTO_SCTP)
1324                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1325                                 break;
1326                         default:
1327                                 break;
1328                         }
1329                 }
1330         }
1331
1332         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1333                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1334                 case VIRTIO_NET_HDR_GSO_TCPV4:
1335                 case VIRTIO_NET_HDR_GSO_TCPV6:
1336                         tcp_hdr = l4_hdr;
1337                         m->ol_flags |= PKT_TX_TCP_SEG;
1338                         m->tso_segsz = hdr->gso_size;
1339                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1340                         break;
1341                 case VIRTIO_NET_HDR_GSO_UDP:
1342                         m->ol_flags |= PKT_TX_UDP_SEG;
1343                         m->tso_segsz = hdr->gso_size;
1344                         m->l4_len = sizeof(struct rte_udp_hdr);
1345                         break;
1346                 default:
1347                         VHOST_LOG_DATA(WARNING,
1348                                 "unsupported gso type %u.\n", hdr->gso_type);
1349                         break;
1350                 }
1351         }
1352 }
1353
1354 static __rte_noinline void
1355 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1356                 struct buf_vector *buf_vec)
1357 {
1358         uint64_t len;
1359         uint64_t remain = sizeof(struct virtio_net_hdr);
1360         uint64_t src;
1361         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1362
1363         while (remain) {
1364                 len = RTE_MIN(remain, buf_vec->buf_len);
1365                 src = buf_vec->buf_addr;
1366                 rte_memcpy((void *)(uintptr_t)dst,
1367                                 (void *)(uintptr_t)src, len);
1368
1369                 remain -= len;
1370                 dst += len;
1371                 buf_vec++;
1372         }
1373 }
1374
1375 static __rte_always_inline int
1376 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1377                   struct buf_vector *buf_vec, uint16_t nr_vec,
1378                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1379 {
1380         uint32_t buf_avail, buf_offset;
1381         uint64_t buf_addr, buf_iova, buf_len;
1382         uint32_t mbuf_avail, mbuf_offset;
1383         uint32_t cpy_len;
1384         struct rte_mbuf *cur = m, *prev = m;
1385         struct virtio_net_hdr tmp_hdr;
1386         struct virtio_net_hdr *hdr = NULL;
1387         /* A counter to avoid desc dead loop chain */
1388         uint16_t vec_idx = 0;
1389         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1390         int error = 0;
1391
1392         buf_addr = buf_vec[vec_idx].buf_addr;
1393         buf_iova = buf_vec[vec_idx].buf_iova;
1394         buf_len = buf_vec[vec_idx].buf_len;
1395
1396         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1397                 error = -1;
1398                 goto out;
1399         }
1400
1401         if (virtio_net_with_host_offload(dev)) {
1402                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1403                         /*
1404                          * No luck, the virtio-net header doesn't fit
1405                          * in a contiguous virtual area.
1406                          */
1407                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1408                         hdr = &tmp_hdr;
1409                 } else {
1410                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1411                 }
1412         }
1413
1414         /*
1415          * A virtio driver normally uses at least 2 desc buffers
1416          * for Tx: the first for storing the header, and others
1417          * for storing the data.
1418          */
1419         if (unlikely(buf_len < dev->vhost_hlen)) {
1420                 buf_offset = dev->vhost_hlen - buf_len;
1421                 vec_idx++;
1422                 buf_addr = buf_vec[vec_idx].buf_addr;
1423                 buf_iova = buf_vec[vec_idx].buf_iova;
1424                 buf_len = buf_vec[vec_idx].buf_len;
1425                 buf_avail  = buf_len - buf_offset;
1426         } else if (buf_len == dev->vhost_hlen) {
1427                 if (unlikely(++vec_idx >= nr_vec))
1428                         goto out;
1429                 buf_addr = buf_vec[vec_idx].buf_addr;
1430                 buf_iova = buf_vec[vec_idx].buf_iova;
1431                 buf_len = buf_vec[vec_idx].buf_len;
1432
1433                 buf_offset = 0;
1434                 buf_avail = buf_len;
1435         } else {
1436                 buf_offset = dev->vhost_hlen;
1437                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1438         }
1439
1440         PRINT_PACKET(dev,
1441                         (uintptr_t)(buf_addr + buf_offset),
1442                         (uint32_t)buf_avail, 0);
1443
1444         mbuf_offset = 0;
1445         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1446         while (1) {
1447                 uint64_t hpa;
1448
1449                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1450
1451                 /*
1452                  * A desc buf might across two host physical pages that are
1453                  * not continuous. In such case (gpa_to_hpa returns 0), data
1454                  * will be copied even though zero copy is enabled.
1455                  */
1456                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1457                                         buf_iova + buf_offset, cpy_len)))) {
1458                         cur->data_len = cpy_len;
1459                         cur->data_off = 0;
1460                         cur->buf_addr =
1461                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1462                         cur->buf_iova = hpa;
1463
1464                         /*
1465                          * In zero copy mode, one mbuf can only reference data
1466                          * for one or partial of one desc buff.
1467                          */
1468                         mbuf_avail = cpy_len;
1469                 } else {
1470                         if (likely(cpy_len > MAX_BATCH_LEN ||
1471                                    vq->batch_copy_nb_elems >= vq->size ||
1472                                    (hdr && cur == m))) {
1473                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1474                                                                    mbuf_offset),
1475                                            (void *)((uintptr_t)(buf_addr +
1476                                                            buf_offset)),
1477                                            cpy_len);
1478                         } else {
1479                                 batch_copy[vq->batch_copy_nb_elems].dst =
1480                                         rte_pktmbuf_mtod_offset(cur, void *,
1481                                                                 mbuf_offset);
1482                                 batch_copy[vq->batch_copy_nb_elems].src =
1483                                         (void *)((uintptr_t)(buf_addr +
1484                                                                 buf_offset));
1485                                 batch_copy[vq->batch_copy_nb_elems].len =
1486                                         cpy_len;
1487                                 vq->batch_copy_nb_elems++;
1488                         }
1489                 }
1490
1491                 mbuf_avail  -= cpy_len;
1492                 mbuf_offset += cpy_len;
1493                 buf_avail -= cpy_len;
1494                 buf_offset += cpy_len;
1495
1496                 /* This buf reaches to its end, get the next one */
1497                 if (buf_avail == 0) {
1498                         if (++vec_idx >= nr_vec)
1499                                 break;
1500
1501                         buf_addr = buf_vec[vec_idx].buf_addr;
1502                         buf_iova = buf_vec[vec_idx].buf_iova;
1503                         buf_len = buf_vec[vec_idx].buf_len;
1504
1505                         buf_offset = 0;
1506                         buf_avail  = buf_len;
1507
1508                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1509                                         (uint32_t)buf_avail, 0);
1510                 }
1511
1512                 /*
1513                  * This mbuf reaches to its end, get a new one
1514                  * to hold more data.
1515                  */
1516                 if (mbuf_avail == 0) {
1517                         cur = rte_pktmbuf_alloc(mbuf_pool);
1518                         if (unlikely(cur == NULL)) {
1519                                 VHOST_LOG_DATA(ERR, "Failed to "
1520                                         "allocate memory for mbuf.\n");
1521                                 error = -1;
1522                                 goto out;
1523                         }
1524                         if (unlikely(dev->dequeue_zero_copy))
1525                                 rte_mbuf_refcnt_update(cur, 1);
1526
1527                         prev->next = cur;
1528                         prev->data_len = mbuf_offset;
1529                         m->nb_segs += 1;
1530                         m->pkt_len += mbuf_offset;
1531                         prev = cur;
1532
1533                         mbuf_offset = 0;
1534                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1535                 }
1536         }
1537
1538         prev->data_len = mbuf_offset;
1539         m->pkt_len    += mbuf_offset;
1540
1541         if (hdr)
1542                 vhost_dequeue_offload(hdr, m);
1543
1544 out:
1545
1546         return error;
1547 }
1548
1549 static __rte_always_inline struct zcopy_mbuf *
1550 get_zmbuf(struct vhost_virtqueue *vq)
1551 {
1552         uint16_t i;
1553         uint16_t last;
1554         int tries = 0;
1555
1556         /* search [last_zmbuf_idx, zmbuf_size) */
1557         i = vq->last_zmbuf_idx;
1558         last = vq->zmbuf_size;
1559
1560 again:
1561         for (; i < last; i++) {
1562                 if (vq->zmbufs[i].in_use == 0) {
1563                         vq->last_zmbuf_idx = i + 1;
1564                         vq->zmbufs[i].in_use = 1;
1565                         return &vq->zmbufs[i];
1566                 }
1567         }
1568
1569         tries++;
1570         if (tries == 1) {
1571                 /* search [0, last_zmbuf_idx) */
1572                 i = 0;
1573                 last = vq->last_zmbuf_idx;
1574                 goto again;
1575         }
1576
1577         return NULL;
1578 }
1579
1580 static void
1581 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
1582 {
1583         rte_free(opaque);
1584 }
1585
1586 static int
1587 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
1588 {
1589         struct rte_mbuf_ext_shared_info *shinfo = NULL;
1590         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
1591         uint16_t buf_len;
1592         rte_iova_t iova;
1593         void *buf;
1594
1595         /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1596          * required, otherwise store shinfo in the new buffer.
1597          */
1598         if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
1599                 shinfo = rte_pktmbuf_mtod(pkt,
1600                                           struct rte_mbuf_ext_shared_info *);
1601         else {
1602                 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
1603                 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
1604         }
1605
1606         if (unlikely(total_len > UINT16_MAX))
1607                 return -ENOSPC;
1608
1609         buf_len = total_len;
1610         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
1611         if (unlikely(buf == NULL))
1612                 return -ENOMEM;
1613
1614         /* Initialize shinfo */
1615         if (shinfo) {
1616                 shinfo->free_cb = virtio_dev_extbuf_free;
1617                 shinfo->fcb_opaque = buf;
1618                 rte_mbuf_ext_refcnt_set(shinfo, 1);
1619         } else {
1620                 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
1621                                               virtio_dev_extbuf_free, buf);
1622                 if (unlikely(shinfo == NULL)) {
1623                         rte_free(buf);
1624                         VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
1625                         return -1;
1626                 }
1627         }
1628
1629         iova = rte_malloc_virt2iova(buf);
1630         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
1631         rte_pktmbuf_reset_headroom(pkt);
1632
1633         return 0;
1634 }
1635
1636 /*
1637  * Allocate a host supported pktmbuf.
1638  */
1639 static __rte_always_inline struct rte_mbuf *
1640 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
1641                          uint32_t data_len)
1642 {
1643         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
1644
1645         if (unlikely(pkt == NULL)) {
1646                 VHOST_LOG_DATA(ERR,
1647                         "Failed to allocate memory for mbuf.\n");
1648                 return NULL;
1649         }
1650
1651         if (rte_pktmbuf_tailroom(pkt) >= data_len)
1652                 return pkt;
1653
1654         /* attach an external buffer if supported */
1655         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
1656                 return pkt;
1657
1658         /* check if chained buffers are allowed */
1659         if (!dev->linearbuf)
1660                 return pkt;
1661
1662         /* Data doesn't fit into the buffer and the host supports
1663          * only linear buffers
1664          */
1665         rte_pktmbuf_free(pkt);
1666
1667         return NULL;
1668 }
1669
1670 static __rte_noinline uint16_t
1671 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1672         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1673 {
1674         uint16_t i;
1675         uint16_t free_entries;
1676
1677         if (unlikely(dev->dequeue_zero_copy)) {
1678                 struct zcopy_mbuf *zmbuf, *next;
1679
1680                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1681                      zmbuf != NULL; zmbuf = next) {
1682                         next = TAILQ_NEXT(zmbuf, next);
1683
1684                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1685                                 update_shadow_used_ring_split(vq,
1686                                                 zmbuf->desc_idx, 0);
1687                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1688                                 restore_mbuf(zmbuf->mbuf);
1689                                 rte_pktmbuf_free(zmbuf->mbuf);
1690                                 put_zmbuf(zmbuf);
1691                                 vq->nr_zmbuf -= 1;
1692                         }
1693                 }
1694
1695                 if (likely(vq->shadow_used_idx)) {
1696                         flush_shadow_used_ring_split(dev, vq);
1697                         vhost_vring_call_split(dev, vq);
1698                 }
1699         }
1700
1701         /*
1702          * The ordering between avail index and
1703          * desc reads needs to be enforced.
1704          */
1705         free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
1706                         vq->last_avail_idx;
1707         if (free_entries == 0)
1708                 return 0;
1709
1710         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1711
1712         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1713
1714         count = RTE_MIN(count, MAX_PKT_BURST);
1715         count = RTE_MIN(count, free_entries);
1716         VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
1717                         dev->vid, count);
1718
1719         for (i = 0; i < count; i++) {
1720                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1721                 uint16_t head_idx;
1722                 uint32_t buf_len;
1723                 uint16_t nr_vec = 0;
1724                 int err;
1725
1726                 if (unlikely(fill_vec_buf_split(dev, vq,
1727                                                 vq->last_avail_idx + i,
1728                                                 &nr_vec, buf_vec,
1729                                                 &head_idx, &buf_len,
1730                                                 VHOST_ACCESS_RO) < 0))
1731                         break;
1732
1733                 if (likely(dev->dequeue_zero_copy == 0))
1734                         update_shadow_used_ring_split(vq, head_idx, 0);
1735
1736                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1737                 if (unlikely(pkts[i] == NULL))
1738                         break;
1739
1740                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1741                                 mbuf_pool);
1742                 if (unlikely(err)) {
1743                         rte_pktmbuf_free(pkts[i]);
1744                         break;
1745                 }
1746
1747                 if (unlikely(dev->dequeue_zero_copy)) {
1748                         struct zcopy_mbuf *zmbuf;
1749
1750                         zmbuf = get_zmbuf(vq);
1751                         if (!zmbuf) {
1752                                 rte_pktmbuf_free(pkts[i]);
1753                                 break;
1754                         }
1755                         zmbuf->mbuf = pkts[i];
1756                         zmbuf->desc_idx = head_idx;
1757
1758                         /*
1759                          * Pin lock the mbuf; we will check later to see
1760                          * whether the mbuf is freed (when we are the last
1761                          * user) or not. If that's the case, we then could
1762                          * update the used ring safely.
1763                          */
1764                         rte_mbuf_refcnt_update(pkts[i], 1);
1765
1766                         vq->nr_zmbuf += 1;
1767                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1768                 }
1769         }
1770         vq->last_avail_idx += i;
1771
1772         if (likely(dev->dequeue_zero_copy == 0)) {
1773                 do_data_copy_dequeue(vq);
1774                 if (unlikely(i < count))
1775                         vq->shadow_used_idx = i;
1776                 if (likely(vq->shadow_used_idx)) {
1777                         flush_shadow_used_ring_split(dev, vq);
1778                         vhost_vring_call_split(dev, vq);
1779                 }
1780         }
1781
1782         return i;
1783 }
1784
1785 static __rte_always_inline int
1786 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
1787                                  struct vhost_virtqueue *vq,
1788                                  struct rte_mempool *mbuf_pool,
1789                                  struct rte_mbuf **pkts,
1790                                  uint16_t avail_idx,
1791                                  uintptr_t *desc_addrs,
1792                                  uint16_t *ids)
1793 {
1794         bool wrap = vq->avail_wrap_counter;
1795         struct vring_packed_desc *descs = vq->desc_packed;
1796         struct virtio_net_hdr *hdr;
1797         uint64_t lens[PACKED_BATCH_SIZE];
1798         uint64_t buf_lens[PACKED_BATCH_SIZE];
1799         uint32_t buf_offset = dev->vhost_hlen;
1800         uint16_t flags, i;
1801
1802         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1803                 return -1;
1804         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1805                 return -1;
1806
1807         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1808                 flags = descs[avail_idx + i].flags;
1809                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
1810                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
1811                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
1812                         return -1;
1813         }
1814
1815         rte_smp_rmb();
1816
1817         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1818                 lens[i] = descs[avail_idx + i].len;
1819
1820         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1821                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1822                                                   descs[avail_idx + i].addr,
1823                                                   &lens[i], VHOST_ACCESS_RW);
1824         }
1825
1826         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1827                 if (unlikely(!desc_addrs[i]))
1828                         return -1;
1829                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
1830                         return -1;
1831         }
1832
1833         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1834                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
1835                 if (!pkts[i])
1836                         goto free_buf;
1837         }
1838
1839         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1840                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
1841
1842         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1843                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
1844                         goto free_buf;
1845         }
1846
1847         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1848                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
1849                 pkts[i]->data_len = pkts[i]->pkt_len;
1850                 ids[i] = descs[avail_idx + i].id;
1851         }
1852
1853         if (virtio_net_with_host_offload(dev)) {
1854                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1855                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
1856                         vhost_dequeue_offload(hdr, pkts[i]);
1857                 }
1858         }
1859
1860         return 0;
1861
1862 free_buf:
1863         for (i = 0; i < PACKED_BATCH_SIZE; i++)
1864                 rte_pktmbuf_free(pkts[i]);
1865
1866         return -1;
1867 }
1868
1869 static __rte_always_inline int
1870 virtio_dev_tx_batch_packed(struct virtio_net *dev,
1871                            struct vhost_virtqueue *vq,
1872                            struct rte_mempool *mbuf_pool,
1873                            struct rte_mbuf **pkts)
1874 {
1875         uint16_t avail_idx = vq->last_avail_idx;
1876         uint32_t buf_offset = dev->vhost_hlen;
1877         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1878         uint16_t ids[PACKED_BATCH_SIZE];
1879         uint16_t i;
1880
1881         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1882                                              avail_idx, desc_addrs, ids))
1883                 return -1;
1884
1885         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1886                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1887
1888         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1889                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1890                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1891                            pkts[i]->pkt_len);
1892
1893         if (virtio_net_is_inorder(dev))
1894                 vhost_shadow_dequeue_batch_packed_inorder(vq,
1895                         ids[PACKED_BATCH_SIZE - 1]);
1896         else
1897                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
1898
1899         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1900
1901         return 0;
1902 }
1903
1904 static __rte_always_inline int
1905 vhost_dequeue_single_packed(struct virtio_net *dev,
1906                             struct vhost_virtqueue *vq,
1907                             struct rte_mempool *mbuf_pool,
1908                             struct rte_mbuf **pkts,
1909                             uint16_t *buf_id,
1910                             uint16_t *desc_count)
1911 {
1912         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1913         uint32_t buf_len;
1914         uint16_t nr_vec = 0;
1915         int err;
1916
1917         if (unlikely(fill_vec_buf_packed(dev, vq,
1918                                          vq->last_avail_idx, desc_count,
1919                                          buf_vec, &nr_vec,
1920                                          buf_id, &buf_len,
1921                                          VHOST_ACCESS_RO) < 0))
1922                 return -1;
1923
1924         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1925         if (unlikely(*pkts == NULL)) {
1926                 VHOST_LOG_DATA(ERR,
1927                         "Failed to allocate memory for mbuf.\n");
1928                 return -1;
1929         }
1930
1931         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
1932                                 mbuf_pool);
1933         if (unlikely(err)) {
1934                 rte_pktmbuf_free(*pkts);
1935                 return -1;
1936         }
1937
1938         return 0;
1939 }
1940
1941 static __rte_always_inline int
1942 virtio_dev_tx_single_packed(struct virtio_net *dev,
1943                             struct vhost_virtqueue *vq,
1944                             struct rte_mempool *mbuf_pool,
1945                             struct rte_mbuf **pkts)
1946 {
1947
1948         uint16_t buf_id, desc_count;
1949
1950         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
1951                                         &desc_count))
1952                 return -1;
1953
1954         if (virtio_net_is_inorder(dev))
1955                 vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
1956                                                            desc_count);
1957         else
1958                 vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
1959
1960         vq_inc_last_avail_packed(vq, desc_count);
1961
1962         return 0;
1963 }
1964
1965 static __rte_always_inline int
1966 virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
1967                                  struct vhost_virtqueue *vq,
1968                                  struct rte_mempool *mbuf_pool,
1969                                  struct rte_mbuf **pkts)
1970 {
1971         struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
1972         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1973         uint16_t ids[PACKED_BATCH_SIZE];
1974         uint16_t i;
1975
1976         uint16_t avail_idx = vq->last_avail_idx;
1977
1978         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1979                                              avail_idx, desc_addrs, ids))
1980                 return -1;
1981
1982         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1983                 zmbufs[i] = get_zmbuf(vq);
1984
1985         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1986                 if (!zmbufs[i])
1987                         goto free_pkt;
1988         }
1989
1990         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1991                 zmbufs[i]->mbuf = pkts[i];
1992                 zmbufs[i]->desc_idx = ids[i];
1993                 zmbufs[i]->desc_count = 1;
1994         }
1995
1996         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1997                 rte_mbuf_refcnt_update(pkts[i], 1);
1998
1999         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2000                 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
2001
2002         vq->nr_zmbuf += PACKED_BATCH_SIZE;
2003         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2004
2005         return 0;
2006
2007 free_pkt:
2008         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2009                 rte_pktmbuf_free(pkts[i]);
2010
2011         return -1;
2012 }
2013
2014 static __rte_always_inline int
2015 virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
2016                                   struct vhost_virtqueue *vq,
2017                                   struct rte_mempool *mbuf_pool,
2018                                   struct rte_mbuf **pkts)
2019 {
2020         uint16_t buf_id, desc_count;
2021         struct zcopy_mbuf *zmbuf;
2022
2023         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2024                                         &desc_count))
2025                 return -1;
2026
2027         zmbuf = get_zmbuf(vq);
2028         if (!zmbuf) {
2029                 rte_pktmbuf_free(*pkts);
2030                 return -1;
2031         }
2032         zmbuf->mbuf = *pkts;
2033         zmbuf->desc_idx = buf_id;
2034         zmbuf->desc_count = desc_count;
2035
2036         rte_mbuf_refcnt_update(*pkts, 1);
2037
2038         vq->nr_zmbuf += 1;
2039         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2040
2041         vq_inc_last_avail_packed(vq, desc_count);
2042         return 0;
2043 }
2044
2045 static __rte_always_inline void
2046 free_zmbuf(struct vhost_virtqueue *vq)
2047 {
2048         struct zcopy_mbuf *next = NULL;
2049         struct zcopy_mbuf *zmbuf;
2050
2051         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
2052              zmbuf != NULL; zmbuf = next) {
2053                 next = TAILQ_NEXT(zmbuf, next);
2054
2055                 uint16_t last_used_idx = vq->last_used_idx;
2056
2057                 if (mbuf_is_consumed(zmbuf->mbuf)) {
2058                         uint16_t flags;
2059                         flags = vq->desc_packed[last_used_idx].flags;
2060                         if (vq->used_wrap_counter) {
2061                                 flags |= VRING_DESC_F_USED;
2062                                 flags |= VRING_DESC_F_AVAIL;
2063                         } else {
2064                                 flags &= ~VRING_DESC_F_USED;
2065                                 flags &= ~VRING_DESC_F_AVAIL;
2066                         }
2067
2068                         vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
2069                         vq->desc_packed[last_used_idx].len = 0;
2070
2071                         rte_smp_wmb();
2072                         vq->desc_packed[last_used_idx].flags = flags;
2073
2074                         vq_inc_last_used_packed(vq, zmbuf->desc_count);
2075
2076                         TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
2077                         restore_mbuf(zmbuf->mbuf);
2078                         rte_pktmbuf_free(zmbuf->mbuf);
2079                         put_zmbuf(zmbuf);
2080                         vq->nr_zmbuf -= 1;
2081                 }
2082         }
2083 }
2084
2085 static __rte_noinline uint16_t
2086 virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
2087                            struct vhost_virtqueue *vq,
2088                            struct rte_mempool *mbuf_pool,
2089                            struct rte_mbuf **pkts,
2090                            uint32_t count)
2091 {
2092         uint32_t pkt_idx = 0;
2093         uint32_t remained = count;
2094
2095         free_zmbuf(vq);
2096
2097         do {
2098                 if (remained >= PACKED_BATCH_SIZE) {
2099                         if (!virtio_dev_tx_batch_packed_zmbuf(dev, vq,
2100                                 mbuf_pool, &pkts[pkt_idx])) {
2101                                 pkt_idx += PACKED_BATCH_SIZE;
2102                                 remained -= PACKED_BATCH_SIZE;
2103                                 continue;
2104                         }
2105                 }
2106
2107                 if (virtio_dev_tx_single_packed_zmbuf(dev, vq, mbuf_pool,
2108                                                       &pkts[pkt_idx]))
2109                         break;
2110                 pkt_idx++;
2111                 remained--;
2112
2113         } while (remained);
2114
2115         if (pkt_idx)
2116                 vhost_vring_call_packed(dev, vq);
2117
2118         return pkt_idx;
2119 }
2120
2121 static __rte_noinline uint16_t
2122 virtio_dev_tx_packed(struct virtio_net *dev,
2123                      struct vhost_virtqueue *vq,
2124                      struct rte_mempool *mbuf_pool,
2125                      struct rte_mbuf **pkts,
2126                      uint32_t count)
2127 {
2128         uint32_t pkt_idx = 0;
2129         uint32_t remained = count;
2130
2131         do {
2132                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2133
2134                 if (remained >= PACKED_BATCH_SIZE) {
2135                         if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
2136                                                         &pkts[pkt_idx])) {
2137                                 pkt_idx += PACKED_BATCH_SIZE;
2138                                 remained -= PACKED_BATCH_SIZE;
2139                                 continue;
2140                         }
2141                 }
2142
2143                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2144                                                 &pkts[pkt_idx]))
2145                         break;
2146                 pkt_idx++;
2147                 remained--;
2148
2149         } while (remained);
2150
2151         if (vq->shadow_used_idx) {
2152                 do_data_copy_dequeue(vq);
2153
2154                 vhost_flush_dequeue_shadow_packed(dev, vq);
2155                 vhost_vring_call_packed(dev, vq);
2156         }
2157
2158         return pkt_idx;
2159 }
2160
2161 uint16_t
2162 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2163         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2164 {
2165         struct virtio_net *dev;
2166         struct rte_mbuf *rarp_mbuf = NULL;
2167         struct vhost_virtqueue *vq;
2168         int16_t success = 1;
2169
2170         dev = get_device(vid);
2171         if (!dev)
2172                 return 0;
2173
2174         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2175                 VHOST_LOG_DATA(ERR,
2176                         "(%d) %s: built-in vhost net backend is disabled.\n",
2177                         dev->vid, __func__);
2178                 return 0;
2179         }
2180
2181         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2182                 VHOST_LOG_DATA(ERR,
2183                         "(%d) %s: invalid virtqueue idx %d.\n",
2184                         dev->vid, __func__, queue_id);
2185                 return 0;
2186         }
2187
2188         vq = dev->virtqueue[queue_id];
2189
2190         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2191                 return 0;
2192
2193         if (unlikely(vq->enabled == 0)) {
2194                 count = 0;
2195                 goto out_access_unlock;
2196         }
2197
2198         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2199                 vhost_user_iotlb_rd_lock(vq);
2200
2201         if (unlikely(vq->access_ok == 0))
2202                 if (unlikely(vring_translate(dev, vq) < 0)) {
2203                         count = 0;
2204                         goto out;
2205                 }
2206
2207         /*
2208          * Construct a RARP broadcast packet, and inject it to the "pkts"
2209          * array, to looks like that guest actually send such packet.
2210          *
2211          * Check user_send_rarp() for more information.
2212          *
2213          * broadcast_rarp shares a cacheline in the virtio_net structure
2214          * with some fields that are accessed during enqueue and
2215          * __atomic_compare_exchange_n causes a write if performed compare
2216          * and exchange. This could result in false sharing between enqueue
2217          * and dequeue.
2218          *
2219          * Prevent unnecessary false sharing by reading broadcast_rarp first
2220          * and only performing compare and exchange if the read indicates it
2221          * is likely to be set.
2222          */
2223         if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
2224                         __atomic_compare_exchange_n(&dev->broadcast_rarp,
2225                         &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
2226
2227                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2228                 if (rarp_mbuf == NULL) {
2229                         VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
2230                         count = 0;
2231                         goto out;
2232                 }
2233                 count -= 1;
2234         }
2235
2236         if (vq_is_packed(dev)) {
2237                 if (unlikely(dev->dequeue_zero_copy))
2238                         count = virtio_dev_tx_packed_zmbuf(dev, vq, mbuf_pool,
2239                                                            pkts, count);
2240                 else
2241                         count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts,
2242                                                      count);
2243         } else
2244                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2245
2246 out:
2247         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2248                 vhost_user_iotlb_rd_unlock(vq);
2249
2250 out_access_unlock:
2251         rte_spinlock_unlock(&vq->access_lock);
2252
2253         if (unlikely(rarp_mbuf != NULL)) {
2254                 /*
2255                  * Inject it to the head of "pkts" array, so that switch's mac
2256                  * learning table will get updated first.
2257                  */
2258                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2259                 pkts[0] = rarp_mbuf;
2260                 count += 1;
2261         }
2262
2263         return count;
2264 }