3c8be48ca7ec9732769f9efe4e473129994b0009
[dpdk.git] / lib / vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_net.h>
12 #include <rte_ether.h>
13 #include <rte_ip.h>
14 #include <rte_vhost.h>
15 #include <rte_tcp.h>
16 #include <rte_udp.h>
17 #include <rte_sctp.h>
18 #include <rte_arp.h>
19 #include <rte_spinlock.h>
20 #include <rte_malloc.h>
21 #include <rte_vhost_async.h>
22
23 #include "iotlb.h"
24 #include "vhost.h"
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static  __rte_always_inline bool
35 virtio_net_is_inorder(struct virtio_net *dev)
36 {
37         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
38 }
39
40 static bool
41 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
42 {
43         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
44 }
45
46 static inline void
47 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
48 {
49         struct batch_copy_elem *elem = vq->batch_copy_elems;
50         uint16_t count = vq->batch_copy_nb_elems;
51         int i;
52
53         for (i = 0; i < count; i++) {
54                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
55                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
56                                            elem[i].len);
57                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
58         }
59
60         vq->batch_copy_nb_elems = 0;
61 }
62
63 static inline void
64 do_data_copy_dequeue(struct vhost_virtqueue *vq)
65 {
66         struct batch_copy_elem *elem = vq->batch_copy_elems;
67         uint16_t count = vq->batch_copy_nb_elems;
68         int i;
69
70         for (i = 0; i < count; i++)
71                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
72
73         vq->batch_copy_nb_elems = 0;
74 }
75
76 static __rte_always_inline void
77 do_flush_shadow_used_ring_split(struct virtio_net *dev,
78                         struct vhost_virtqueue *vq,
79                         uint16_t to, uint16_t from, uint16_t size)
80 {
81         rte_memcpy(&vq->used->ring[to],
82                         &vq->shadow_used_split[from],
83                         size * sizeof(struct vring_used_elem));
84         vhost_log_cache_used_vring(dev, vq,
85                         offsetof(struct vring_used, ring[to]),
86                         size * sizeof(struct vring_used_elem));
87 }
88
89 static __rte_always_inline void
90 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
91 {
92         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
93
94         if (used_idx + vq->shadow_used_idx <= vq->size) {
95                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
96                                           vq->shadow_used_idx);
97         } else {
98                 uint16_t size;
99
100                 /* update used ring interval [used_idx, vq->size] */
101                 size = vq->size - used_idx;
102                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
103
104                 /* update the left half used ring interval [0, left_size] */
105                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
106                                           vq->shadow_used_idx - size);
107         }
108         vq->last_used_idx += vq->shadow_used_idx;
109
110         vhost_log_cache_sync(dev, vq);
111
112         __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
113                            __ATOMIC_RELEASE);
114         vq->shadow_used_idx = 0;
115         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
116                 sizeof(vq->used->idx));
117 }
118
119 static __rte_always_inline void
120 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
121                          uint16_t desc_idx, uint32_t len)
122 {
123         uint16_t i = vq->shadow_used_idx++;
124
125         vq->shadow_used_split[i].id  = desc_idx;
126         vq->shadow_used_split[i].len = len;
127 }
128
129 static __rte_always_inline void
130 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
131                                   struct vhost_virtqueue *vq)
132 {
133         int i;
134         uint16_t used_idx = vq->last_used_idx;
135         uint16_t head_idx = vq->last_used_idx;
136         uint16_t head_flags = 0;
137
138         /* Split loop in two to save memory barriers */
139         for (i = 0; i < vq->shadow_used_idx; i++) {
140                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
141                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
142
143                 used_idx += vq->shadow_used_packed[i].count;
144                 if (used_idx >= vq->size)
145                         used_idx -= vq->size;
146         }
147
148         /* The ordering for storing desc flags needs to be enforced. */
149         rte_atomic_thread_fence(__ATOMIC_RELEASE);
150
151         for (i = 0; i < vq->shadow_used_idx; i++) {
152                 uint16_t flags;
153
154                 if (vq->shadow_used_packed[i].len)
155                         flags = VRING_DESC_F_WRITE;
156                 else
157                         flags = 0;
158
159                 if (vq->used_wrap_counter) {
160                         flags |= VRING_DESC_F_USED;
161                         flags |= VRING_DESC_F_AVAIL;
162                 } else {
163                         flags &= ~VRING_DESC_F_USED;
164                         flags &= ~VRING_DESC_F_AVAIL;
165                 }
166
167                 if (i > 0) {
168                         vq->desc_packed[vq->last_used_idx].flags = flags;
169
170                         vhost_log_cache_used_vring(dev, vq,
171                                         vq->last_used_idx *
172                                         sizeof(struct vring_packed_desc),
173                                         sizeof(struct vring_packed_desc));
174                 } else {
175                         head_idx = vq->last_used_idx;
176                         head_flags = flags;
177                 }
178
179                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
180         }
181
182         vq->desc_packed[head_idx].flags = head_flags;
183
184         vhost_log_cache_used_vring(dev, vq,
185                                 head_idx *
186                                 sizeof(struct vring_packed_desc),
187                                 sizeof(struct vring_packed_desc));
188
189         vq->shadow_used_idx = 0;
190         vhost_log_cache_sync(dev, vq);
191 }
192
193 static __rte_always_inline void
194 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
195                                   struct vhost_virtqueue *vq)
196 {
197         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
198
199         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
200         /* desc flags is the synchronization point for virtio packed vring */
201         __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
202                          used_elem->flags, __ATOMIC_RELEASE);
203
204         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
205                                    sizeof(struct vring_packed_desc),
206                                    sizeof(struct vring_packed_desc));
207         vq->shadow_used_idx = 0;
208         vhost_log_cache_sync(dev, vq);
209 }
210
211 static __rte_always_inline void
212 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
213                                  struct vhost_virtqueue *vq,
214                                  uint64_t *lens,
215                                  uint16_t *ids)
216 {
217         uint16_t i;
218         uint16_t flags;
219         uint16_t last_used_idx;
220         struct vring_packed_desc *desc_base;
221
222         last_used_idx = vq->last_used_idx;
223         desc_base = &vq->desc_packed[last_used_idx];
224
225         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
226
227         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
228                 desc_base[i].id = ids[i];
229                 desc_base[i].len = lens[i];
230         }
231
232         rte_atomic_thread_fence(__ATOMIC_RELEASE);
233
234         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
235                 desc_base[i].flags = flags;
236         }
237
238         vhost_log_cache_used_vring(dev, vq, last_used_idx *
239                                    sizeof(struct vring_packed_desc),
240                                    sizeof(struct vring_packed_desc) *
241                                    PACKED_BATCH_SIZE);
242         vhost_log_cache_sync(dev, vq);
243
244         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
245 }
246
247 static __rte_always_inline void
248 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
249                                           uint16_t id)
250 {
251         vq->shadow_used_packed[0].id = id;
252
253         if (!vq->shadow_used_idx) {
254                 vq->shadow_last_used_idx = vq->last_used_idx;
255                 vq->shadow_used_packed[0].flags =
256                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
257                 vq->shadow_used_packed[0].len = 0;
258                 vq->shadow_used_packed[0].count = 1;
259                 vq->shadow_used_idx++;
260         }
261
262         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
263 }
264
265 static __rte_always_inline void
266 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
267                                   struct vhost_virtqueue *vq,
268                                   uint16_t *ids)
269 {
270         uint16_t flags;
271         uint16_t i;
272         uint16_t begin;
273
274         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
275
276         if (!vq->shadow_used_idx) {
277                 vq->shadow_last_used_idx = vq->last_used_idx;
278                 vq->shadow_used_packed[0].id  = ids[0];
279                 vq->shadow_used_packed[0].len = 0;
280                 vq->shadow_used_packed[0].count = 1;
281                 vq->shadow_used_packed[0].flags = flags;
282                 vq->shadow_used_idx++;
283                 begin = 1;
284         } else
285                 begin = 0;
286
287         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
288                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
289                 vq->desc_packed[vq->last_used_idx + i].len = 0;
290         }
291
292         rte_atomic_thread_fence(__ATOMIC_RELEASE);
293         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
294                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
295
296         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
297                                    sizeof(struct vring_packed_desc),
298                                    sizeof(struct vring_packed_desc) *
299                                    PACKED_BATCH_SIZE);
300         vhost_log_cache_sync(dev, vq);
301
302         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
303 }
304
305 static __rte_always_inline void
306 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
307                                    uint16_t buf_id,
308                                    uint16_t count)
309 {
310         uint16_t flags;
311
312         flags = vq->desc_packed[vq->last_used_idx].flags;
313         if (vq->used_wrap_counter) {
314                 flags |= VRING_DESC_F_USED;
315                 flags |= VRING_DESC_F_AVAIL;
316         } else {
317                 flags &= ~VRING_DESC_F_USED;
318                 flags &= ~VRING_DESC_F_AVAIL;
319         }
320
321         if (!vq->shadow_used_idx) {
322                 vq->shadow_last_used_idx = vq->last_used_idx;
323
324                 vq->shadow_used_packed[0].id  = buf_id;
325                 vq->shadow_used_packed[0].len = 0;
326                 vq->shadow_used_packed[0].flags = flags;
327                 vq->shadow_used_idx++;
328         } else {
329                 vq->desc_packed[vq->last_used_idx].id = buf_id;
330                 vq->desc_packed[vq->last_used_idx].len = 0;
331                 vq->desc_packed[vq->last_used_idx].flags = flags;
332         }
333
334         vq_inc_last_used_packed(vq, count);
335 }
336
337 static __rte_always_inline void
338 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
339                                            uint16_t buf_id,
340                                            uint16_t count)
341 {
342         uint16_t flags;
343
344         vq->shadow_used_packed[0].id = buf_id;
345
346         flags = vq->desc_packed[vq->last_used_idx].flags;
347         if (vq->used_wrap_counter) {
348                 flags |= VRING_DESC_F_USED;
349                 flags |= VRING_DESC_F_AVAIL;
350         } else {
351                 flags &= ~VRING_DESC_F_USED;
352                 flags &= ~VRING_DESC_F_AVAIL;
353         }
354
355         if (!vq->shadow_used_idx) {
356                 vq->shadow_last_used_idx = vq->last_used_idx;
357                 vq->shadow_used_packed[0].len = 0;
358                 vq->shadow_used_packed[0].flags = flags;
359                 vq->shadow_used_idx++;
360         }
361
362         vq_inc_last_used_packed(vq, count);
363 }
364
365 static __rte_always_inline void
366 vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
367                                    uint32_t *len,
368                                    uint16_t *id,
369                                    uint16_t *count,
370                                    uint16_t num_buffers)
371 {
372         uint16_t i;
373
374         for (i = 0; i < num_buffers; i++) {
375                 /* enqueue shadow flush action aligned with batch num */
376                 if (!vq->shadow_used_idx)
377                         vq->shadow_aligned_idx = vq->last_used_idx &
378                                 PACKED_BATCH_MASK;
379                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
380                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
381                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
382                 vq->shadow_aligned_idx += count[i];
383                 vq->shadow_used_idx++;
384         }
385 }
386
387 static __rte_always_inline void
388 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
389                                    struct vhost_virtqueue *vq,
390                                    uint32_t *len,
391                                    uint16_t *id,
392                                    uint16_t *count,
393                                    uint16_t num_buffers)
394 {
395         vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
396
397         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
398                 do_data_copy_enqueue(dev, vq);
399                 vhost_flush_enqueue_shadow_packed(dev, vq);
400         }
401 }
402
403 /* avoid write operation when necessary, to lessen cache issues */
404 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
405         if ((var) != (val))                     \
406                 (var) = (val);                  \
407 } while (0)
408
409 static __rte_always_inline void
410 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
411 {
412         uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
413
414         if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
415                 csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
416
417         if (csum_l4) {
418                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
419                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
420
421                 switch (csum_l4) {
422                 case RTE_MBUF_F_TX_TCP_CKSUM:
423                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
424                                                 cksum));
425                         break;
426                 case RTE_MBUF_F_TX_UDP_CKSUM:
427                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
428                                                 dgram_cksum));
429                         break;
430                 case RTE_MBUF_F_TX_SCTP_CKSUM:
431                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
432                                                 cksum));
433                         break;
434                 }
435         } else {
436                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
437                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
438                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
439         }
440
441         /* IP cksum verification cannot be bypassed, then calculate here */
442         if (m_buf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
443                 struct rte_ipv4_hdr *ipv4_hdr;
444
445                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
446                                                    m_buf->l2_len);
447                 ipv4_hdr->hdr_checksum = 0;
448                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
449         }
450
451         if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
452                 if (m_buf->ol_flags & RTE_MBUF_F_TX_IPV4)
453                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
454                 else
455                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
456                 net_hdr->gso_size = m_buf->tso_segsz;
457                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
458                                         + m_buf->l4_len;
459         } else if (m_buf->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
460                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
461                 net_hdr->gso_size = m_buf->tso_segsz;
462                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
463                         m_buf->l4_len;
464         } else {
465                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
466                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
467                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
468         }
469 }
470
471 static __rte_always_inline int
472 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
473                 struct buf_vector *buf_vec, uint16_t *vec_idx,
474                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
475 {
476         uint16_t vec_id = *vec_idx;
477
478         while (desc_len) {
479                 uint64_t desc_addr;
480                 uint64_t desc_chunck_len = desc_len;
481
482                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
483                         return -1;
484
485                 desc_addr = vhost_iova_to_vva(dev, vq,
486                                 desc_iova,
487                                 &desc_chunck_len,
488                                 perm);
489                 if (unlikely(!desc_addr))
490                         return -1;
491
492                 rte_prefetch0((void *)(uintptr_t)desc_addr);
493
494                 buf_vec[vec_id].buf_iova = desc_iova;
495                 buf_vec[vec_id].buf_addr = desc_addr;
496                 buf_vec[vec_id].buf_len  = desc_chunck_len;
497
498                 desc_len -= desc_chunck_len;
499                 desc_iova += desc_chunck_len;
500                 vec_id++;
501         }
502         *vec_idx = vec_id;
503
504         return 0;
505 }
506
507 static __rte_always_inline int
508 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
509                          uint32_t avail_idx, uint16_t *vec_idx,
510                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
511                          uint32_t *desc_chain_len, uint8_t perm)
512 {
513         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
514         uint16_t vec_id = *vec_idx;
515         uint32_t len    = 0;
516         uint64_t dlen;
517         uint32_t nr_descs = vq->size;
518         uint32_t cnt    = 0;
519         struct vring_desc *descs = vq->desc;
520         struct vring_desc *idesc = NULL;
521
522         if (unlikely(idx >= vq->size))
523                 return -1;
524
525         *desc_chain_head = idx;
526
527         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
528                 dlen = vq->desc[idx].len;
529                 nr_descs = dlen / sizeof(struct vring_desc);
530                 if (unlikely(nr_descs > vq->size))
531                         return -1;
532
533                 descs = (struct vring_desc *)(uintptr_t)
534                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
535                                                 &dlen,
536                                                 VHOST_ACCESS_RO);
537                 if (unlikely(!descs))
538                         return -1;
539
540                 if (unlikely(dlen < vq->desc[idx].len)) {
541                         /*
542                          * The indirect desc table is not contiguous
543                          * in process VA space, we have to copy it.
544                          */
545                         idesc = vhost_alloc_copy_ind_table(dev, vq,
546                                         vq->desc[idx].addr, vq->desc[idx].len);
547                         if (unlikely(!idesc))
548                                 return -1;
549
550                         descs = idesc;
551                 }
552
553                 idx = 0;
554         }
555
556         while (1) {
557                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
558                         free_ind_table(idesc);
559                         return -1;
560                 }
561
562                 dlen = descs[idx].len;
563                 len += dlen;
564
565                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
566                                                 descs[idx].addr, dlen,
567                                                 perm))) {
568                         free_ind_table(idesc);
569                         return -1;
570                 }
571
572                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
573                         break;
574
575                 idx = descs[idx].next;
576         }
577
578         *desc_chain_len = len;
579         *vec_idx = vec_id;
580
581         if (unlikely(!!idesc))
582                 free_ind_table(idesc);
583
584         return 0;
585 }
586
587 /*
588  * Returns -1 on fail, 0 on success
589  */
590 static inline int
591 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
592                                 uint32_t size, struct buf_vector *buf_vec,
593                                 uint16_t *num_buffers, uint16_t avail_head,
594                                 uint16_t *nr_vec)
595 {
596         uint16_t cur_idx;
597         uint16_t vec_idx = 0;
598         uint16_t max_tries, tries = 0;
599
600         uint16_t head_idx = 0;
601         uint32_t len = 0;
602
603         *num_buffers = 0;
604         cur_idx  = vq->last_avail_idx;
605
606         if (rxvq_is_mergeable(dev))
607                 max_tries = vq->size - 1;
608         else
609                 max_tries = 1;
610
611         while (size > 0) {
612                 if (unlikely(cur_idx == avail_head))
613                         return -1;
614                 /*
615                  * if we tried all available ring items, and still
616                  * can't get enough buf, it means something abnormal
617                  * happened.
618                  */
619                 if (unlikely(++tries > max_tries))
620                         return -1;
621
622                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
623                                                 &vec_idx, buf_vec,
624                                                 &head_idx, &len,
625                                                 VHOST_ACCESS_RW) < 0))
626                         return -1;
627                 len = RTE_MIN(len, size);
628                 update_shadow_used_ring_split(vq, head_idx, len);
629                 size -= len;
630
631                 cur_idx++;
632                 *num_buffers += 1;
633         }
634
635         *nr_vec = vec_idx;
636
637         return 0;
638 }
639
640 static __rte_always_inline int
641 fill_vec_buf_packed_indirect(struct virtio_net *dev,
642                         struct vhost_virtqueue *vq,
643                         struct vring_packed_desc *desc, uint16_t *vec_idx,
644                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
645 {
646         uint16_t i;
647         uint32_t nr_descs;
648         uint16_t vec_id = *vec_idx;
649         uint64_t dlen;
650         struct vring_packed_desc *descs, *idescs = NULL;
651
652         dlen = desc->len;
653         descs = (struct vring_packed_desc *)(uintptr_t)
654                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
655         if (unlikely(!descs))
656                 return -1;
657
658         if (unlikely(dlen < desc->len)) {
659                 /*
660                  * The indirect desc table is not contiguous
661                  * in process VA space, we have to copy it.
662                  */
663                 idescs = vhost_alloc_copy_ind_table(dev,
664                                 vq, desc->addr, desc->len);
665                 if (unlikely(!idescs))
666                         return -1;
667
668                 descs = idescs;
669         }
670
671         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
672         if (unlikely(nr_descs >= vq->size)) {
673                 free_ind_table(idescs);
674                 return -1;
675         }
676
677         for (i = 0; i < nr_descs; i++) {
678                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
679                         free_ind_table(idescs);
680                         return -1;
681                 }
682
683                 dlen = descs[i].len;
684                 *len += dlen;
685                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
686                                                 descs[i].addr, dlen,
687                                                 perm)))
688                         return -1;
689         }
690         *vec_idx = vec_id;
691
692         if (unlikely(!!idescs))
693                 free_ind_table(idescs);
694
695         return 0;
696 }
697
698 static __rte_always_inline int
699 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
700                                 uint16_t avail_idx, uint16_t *desc_count,
701                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
702                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
703 {
704         bool wrap_counter = vq->avail_wrap_counter;
705         struct vring_packed_desc *descs = vq->desc_packed;
706         uint16_t vec_id = *vec_idx;
707         uint64_t dlen;
708
709         if (avail_idx < vq->last_avail_idx)
710                 wrap_counter ^= 1;
711
712         /*
713          * Perform a load-acquire barrier in desc_is_avail to
714          * enforce the ordering between desc flags and desc
715          * content.
716          */
717         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
718                 return -1;
719
720         *desc_count = 0;
721         *len = 0;
722
723         while (1) {
724                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
725                         return -1;
726
727                 if (unlikely(*desc_count >= vq->size))
728                         return -1;
729
730                 *desc_count += 1;
731                 *buf_id = descs[avail_idx].id;
732
733                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
734                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
735                                                         &descs[avail_idx],
736                                                         &vec_id, buf_vec,
737                                                         len, perm) < 0))
738                                 return -1;
739                 } else {
740                         dlen = descs[avail_idx].len;
741                         *len += dlen;
742
743                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
744                                                         descs[avail_idx].addr,
745                                                         dlen,
746                                                         perm)))
747                                 return -1;
748                 }
749
750                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
751                         break;
752
753                 if (++avail_idx >= vq->size) {
754                         avail_idx -= vq->size;
755                         wrap_counter ^= 1;
756                 }
757         }
758
759         *vec_idx = vec_id;
760
761         return 0;
762 }
763
764 static __rte_noinline void
765 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
766                 struct buf_vector *buf_vec,
767                 struct virtio_net_hdr_mrg_rxbuf *hdr)
768 {
769         uint64_t len;
770         uint64_t remain = dev->vhost_hlen;
771         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
772         uint64_t iova = buf_vec->buf_iova;
773
774         while (remain) {
775                 len = RTE_MIN(remain,
776                                 buf_vec->buf_len);
777                 dst = buf_vec->buf_addr;
778                 rte_memcpy((void *)(uintptr_t)dst,
779                                 (void *)(uintptr_t)src,
780                                 len);
781
782                 PRINT_PACKET(dev, (uintptr_t)dst,
783                                 (uint32_t)len, 0);
784                 vhost_log_cache_write_iova(dev, vq,
785                                 iova, len);
786
787                 remain -= len;
788                 iova += len;
789                 src += len;
790                 buf_vec++;
791         }
792 }
793
794 static __rte_always_inline int
795 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
796                             struct rte_mbuf *m, struct buf_vector *buf_vec,
797                             uint16_t nr_vec, uint16_t num_buffers)
798 {
799         uint32_t vec_idx = 0;
800         uint32_t mbuf_offset, mbuf_avail;
801         uint32_t buf_offset, buf_avail;
802         uint64_t buf_addr, buf_iova, buf_len;
803         uint32_t cpy_len;
804         uint64_t hdr_addr;
805         struct rte_mbuf *hdr_mbuf;
806         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
807         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
808         int error = 0;
809
810         if (unlikely(m == NULL)) {
811                 error = -1;
812                 goto out;
813         }
814
815         buf_addr = buf_vec[vec_idx].buf_addr;
816         buf_iova = buf_vec[vec_idx].buf_iova;
817         buf_len = buf_vec[vec_idx].buf_len;
818
819         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
820                 error = -1;
821                 goto out;
822         }
823
824         hdr_mbuf = m;
825         hdr_addr = buf_addr;
826         if (unlikely(buf_len < dev->vhost_hlen)) {
827                 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
828                 hdr = &tmp_hdr;
829         } else
830                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
831
832         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
833                 dev->vid, num_buffers);
834
835         if (unlikely(buf_len < dev->vhost_hlen)) {
836                 buf_offset = dev->vhost_hlen - buf_len;
837                 vec_idx++;
838                 buf_addr = buf_vec[vec_idx].buf_addr;
839                 buf_iova = buf_vec[vec_idx].buf_iova;
840                 buf_len = buf_vec[vec_idx].buf_len;
841                 buf_avail = buf_len - buf_offset;
842         } else {
843                 buf_offset = dev->vhost_hlen;
844                 buf_avail = buf_len - dev->vhost_hlen;
845         }
846
847         mbuf_avail  = rte_pktmbuf_data_len(m);
848         mbuf_offset = 0;
849         while (mbuf_avail != 0 || m->next != NULL) {
850                 /* done with current buf, get the next one */
851                 if (buf_avail == 0) {
852                         vec_idx++;
853                         if (unlikely(vec_idx >= nr_vec)) {
854                                 error = -1;
855                                 goto out;
856                         }
857
858                         buf_addr = buf_vec[vec_idx].buf_addr;
859                         buf_iova = buf_vec[vec_idx].buf_iova;
860                         buf_len = buf_vec[vec_idx].buf_len;
861
862                         buf_offset = 0;
863                         buf_avail  = buf_len;
864                 }
865
866                 /* done with current mbuf, get the next one */
867                 if (mbuf_avail == 0) {
868                         m = m->next;
869
870                         mbuf_offset = 0;
871                         mbuf_avail  = rte_pktmbuf_data_len(m);
872                 }
873
874                 if (hdr_addr) {
875                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
876                         if (rxvq_is_mergeable(dev))
877                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
878                                                 num_buffers);
879
880                         if (unlikely(hdr == &tmp_hdr)) {
881                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
882                         } else {
883                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
884                                                 dev->vhost_hlen, 0);
885                                 vhost_log_cache_write_iova(dev, vq,
886                                                 buf_vec[0].buf_iova,
887                                                 dev->vhost_hlen);
888                         }
889
890                         hdr_addr = 0;
891                 }
892
893                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
894
895                 if (likely(cpy_len > MAX_BATCH_LEN ||
896                                         vq->batch_copy_nb_elems >= vq->size)) {
897                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
898                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
899                                 cpy_len);
900                         vhost_log_cache_write_iova(dev, vq,
901                                                    buf_iova + buf_offset,
902                                                    cpy_len);
903                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
904                                 cpy_len, 0);
905                 } else {
906                         batch_copy[vq->batch_copy_nb_elems].dst =
907                                 (void *)((uintptr_t)(buf_addr + buf_offset));
908                         batch_copy[vq->batch_copy_nb_elems].src =
909                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
910                         batch_copy[vq->batch_copy_nb_elems].log_addr =
911                                 buf_iova + buf_offset;
912                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
913                         vq->batch_copy_nb_elems++;
914                 }
915
916                 mbuf_avail  -= cpy_len;
917                 mbuf_offset += cpy_len;
918                 buf_avail  -= cpy_len;
919                 buf_offset += cpy_len;
920         }
921
922 out:
923
924         return error;
925 }
926
927 static __rte_always_inline void
928 async_fill_vec(struct rte_vhost_iovec *v, void *src, void *dst, size_t len)
929 {
930         v->src_addr = src;
931         v->dst_addr = dst;
932         v->len = len;
933 }
934
935 static __rte_always_inline void
936 async_fill_iter(struct rte_vhost_iov_iter *it, size_t count,
937         struct rte_vhost_iovec *vec, unsigned long nr_seg)
938 {
939         it->offset = 0;
940         it->count = count;
941
942         if (count) {
943                 it->iov = vec;
944                 it->nr_segs = nr_seg;
945         } else {
946                 it->iov = 0;
947                 it->nr_segs = 0;
948         }
949 }
950
951 static __rte_always_inline void
952 async_fill_desc(struct rte_vhost_async_desc *desc, struct rte_vhost_iov_iter *iter)
953 {
954         desc->iter = iter;
955 }
956
957 static __rte_always_inline int
958 async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
959                         struct rte_mbuf *m, struct buf_vector *buf_vec,
960                         uint16_t nr_vec, uint16_t num_buffers,
961                         struct rte_vhost_iovec *iovec, struct rte_vhost_iov_iter *iter)
962 {
963         struct rte_mbuf *hdr_mbuf;
964         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
965         uint64_t buf_addr, buf_iova;
966         uint64_t hdr_addr;
967         uint64_t mapped_len;
968         uint32_t vec_idx = 0;
969         uint32_t mbuf_offset, mbuf_avail;
970         uint32_t buf_offset, buf_avail;
971         uint32_t cpy_len, buf_len;
972         int error = 0;
973
974         uint32_t tlen = 0;
975         int tvec_idx = 0;
976         void *hpa;
977
978         if (unlikely(m == NULL)) {
979                 error = -1;
980                 goto out;
981         }
982
983         buf_addr = buf_vec[vec_idx].buf_addr;
984         buf_iova = buf_vec[vec_idx].buf_iova;
985         buf_len = buf_vec[vec_idx].buf_len;
986
987         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
988                 error = -1;
989                 goto out;
990         }
991
992         hdr_mbuf = m;
993         hdr_addr = buf_addr;
994         if (unlikely(buf_len < dev->vhost_hlen)) {
995                 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
996                 hdr = &tmp_hdr;
997         } else
998                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
999
1000         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
1001                 dev->vid, num_buffers);
1002
1003         if (unlikely(buf_len < dev->vhost_hlen)) {
1004                 buf_offset = dev->vhost_hlen - buf_len;
1005                 vec_idx++;
1006                 buf_addr = buf_vec[vec_idx].buf_addr;
1007                 buf_iova = buf_vec[vec_idx].buf_iova;
1008                 buf_len = buf_vec[vec_idx].buf_len;
1009                 buf_avail = buf_len - buf_offset;
1010         } else {
1011                 buf_offset = dev->vhost_hlen;
1012                 buf_avail = buf_len - dev->vhost_hlen;
1013         }
1014
1015         mbuf_avail  = rte_pktmbuf_data_len(m);
1016         mbuf_offset = 0;
1017
1018         while (mbuf_avail != 0 || m->next != NULL) {
1019                 /* done with current buf, get the next one */
1020                 if (buf_avail == 0) {
1021                         vec_idx++;
1022                         if (unlikely(vec_idx >= nr_vec)) {
1023                                 error = -1;
1024                                 goto out;
1025                         }
1026
1027                         buf_addr = buf_vec[vec_idx].buf_addr;
1028                         buf_iova = buf_vec[vec_idx].buf_iova;
1029                         buf_len = buf_vec[vec_idx].buf_len;
1030
1031                         buf_offset = 0;
1032                         buf_avail = buf_len;
1033                 }
1034
1035                 /* done with current mbuf, get the next one */
1036                 if (mbuf_avail == 0) {
1037                         m = m->next;
1038
1039                         mbuf_offset = 0;
1040                         mbuf_avail = rte_pktmbuf_data_len(m);
1041                 }
1042
1043                 if (hdr_addr) {
1044                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1045                         if (rxvq_is_mergeable(dev))
1046                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1047                                                 num_buffers);
1048
1049                         if (unlikely(hdr == &tmp_hdr)) {
1050                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1051                         } else {
1052                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1053                                                 dev->vhost_hlen, 0);
1054                                 vhost_log_cache_write_iova(dev, vq,
1055                                                 buf_vec[0].buf_iova,
1056                                                 dev->vhost_hlen);
1057                         }
1058
1059                         hdr_addr = 0;
1060                 }
1061
1062                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1063
1064                 while (unlikely(cpy_len)) {
1065                         hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
1066                                         buf_iova + buf_offset,
1067                                         cpy_len, &mapped_len);
1068                         if (unlikely(!hpa)) {
1069                                 VHOST_LOG_DATA(ERR, "(%d) %s: failed to get hpa.\n",
1070                                 dev->vid, __func__);
1071                                 error = -1;
1072                                 goto out;
1073                         }
1074
1075                         async_fill_vec(iovec + tvec_idx,
1076                                 (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
1077                                 mbuf_offset), hpa, (size_t)mapped_len);
1078
1079                         tlen += (uint32_t)mapped_len;
1080                         cpy_len -= (uint32_t)mapped_len;
1081                         mbuf_avail  -= (uint32_t)mapped_len;
1082                         mbuf_offset += (uint32_t)mapped_len;
1083                         buf_avail  -= (uint32_t)mapped_len;
1084                         buf_offset += (uint32_t)mapped_len;
1085                         tvec_idx++;
1086                 }
1087         }
1088
1089         async_fill_iter(iter, tlen, iovec, tvec_idx);
1090 out:
1091         return error;
1092 }
1093
1094 static __rte_always_inline int
1095 vhost_enqueue_single_packed(struct virtio_net *dev,
1096                             struct vhost_virtqueue *vq,
1097                             struct rte_mbuf *pkt,
1098                             struct buf_vector *buf_vec,
1099                             uint16_t *nr_descs)
1100 {
1101         uint16_t nr_vec = 0;
1102         uint16_t avail_idx = vq->last_avail_idx;
1103         uint16_t max_tries, tries = 0;
1104         uint16_t buf_id = 0;
1105         uint32_t len = 0;
1106         uint16_t desc_count;
1107         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1108         uint16_t num_buffers = 0;
1109         uint32_t buffer_len[vq->size];
1110         uint16_t buffer_buf_id[vq->size];
1111         uint16_t buffer_desc_count[vq->size];
1112
1113         if (rxvq_is_mergeable(dev))
1114                 max_tries = vq->size - 1;
1115         else
1116                 max_tries = 1;
1117
1118         while (size > 0) {
1119                 /*
1120                  * if we tried all available ring items, and still
1121                  * can't get enough buf, it means something abnormal
1122                  * happened.
1123                  */
1124                 if (unlikely(++tries > max_tries))
1125                         return -1;
1126
1127                 if (unlikely(fill_vec_buf_packed(dev, vq,
1128                                                 avail_idx, &desc_count,
1129                                                 buf_vec, &nr_vec,
1130                                                 &buf_id, &len,
1131                                                 VHOST_ACCESS_RW) < 0))
1132                         return -1;
1133
1134                 len = RTE_MIN(len, size);
1135                 size -= len;
1136
1137                 buffer_len[num_buffers] = len;
1138                 buffer_buf_id[num_buffers] = buf_id;
1139                 buffer_desc_count[num_buffers] = desc_count;
1140                 num_buffers += 1;
1141
1142                 *nr_descs += desc_count;
1143                 avail_idx += desc_count;
1144                 if (avail_idx >= vq->size)
1145                         avail_idx -= vq->size;
1146         }
1147
1148         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
1149                 return -1;
1150
1151         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1152                                            buffer_desc_count, num_buffers);
1153
1154         return 0;
1155 }
1156
1157 static __rte_noinline uint32_t
1158 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1159         struct rte_mbuf **pkts, uint32_t count)
1160 {
1161         uint32_t pkt_idx = 0;
1162         uint16_t num_buffers;
1163         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1164         uint16_t avail_head;
1165
1166         /*
1167          * The ordering between avail index and
1168          * desc reads needs to be enforced.
1169          */
1170         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1171
1172         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1173
1174         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1175                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1176                 uint16_t nr_vec = 0;
1177
1178                 if (unlikely(reserve_avail_buf_split(dev, vq,
1179                                                 pkt_len, buf_vec, &num_buffers,
1180                                                 avail_head, &nr_vec) < 0)) {
1181                         VHOST_LOG_DATA(DEBUG,
1182                                 "(%d) failed to get enough desc from vring\n",
1183                                 dev->vid);
1184                         vq->shadow_used_idx -= num_buffers;
1185                         break;
1186                 }
1187
1188                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1189                         dev->vid, vq->last_avail_idx,
1190                         vq->last_avail_idx + num_buffers);
1191
1192                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1193                                                 buf_vec, nr_vec,
1194                                                 num_buffers) < 0) {
1195                         vq->shadow_used_idx -= num_buffers;
1196                         break;
1197                 }
1198
1199                 vq->last_avail_idx += num_buffers;
1200         }
1201
1202         do_data_copy_enqueue(dev, vq);
1203
1204         if (likely(vq->shadow_used_idx)) {
1205                 flush_shadow_used_ring_split(dev, vq);
1206                 vhost_vring_call_split(dev, vq);
1207         }
1208
1209         return pkt_idx;
1210 }
1211
1212 static __rte_always_inline int
1213 virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
1214                            struct vhost_virtqueue *vq,
1215                            struct rte_mbuf **pkts,
1216                            uint64_t *desc_addrs,
1217                            uint64_t *lens)
1218 {
1219         bool wrap_counter = vq->avail_wrap_counter;
1220         struct vring_packed_desc *descs = vq->desc_packed;
1221         uint16_t avail_idx = vq->last_avail_idx;
1222         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1223         uint16_t i;
1224
1225         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1226                 return -1;
1227
1228         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1229                 return -1;
1230
1231         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1232                 if (unlikely(pkts[i]->next != NULL))
1233                         return -1;
1234                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1235                                             wrap_counter)))
1236                         return -1;
1237         }
1238
1239         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1240                 lens[i] = descs[avail_idx + i].len;
1241
1242         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1243                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1244                         return -1;
1245         }
1246
1247         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1248                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1249                                                   descs[avail_idx + i].addr,
1250                                                   &lens[i],
1251                                                   VHOST_ACCESS_RW);
1252
1253         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1254                 if (unlikely(!desc_addrs[i]))
1255                         return -1;
1256                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1257                         return -1;
1258         }
1259
1260         return 0;
1261 }
1262
1263 static __rte_always_inline void
1264 virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
1265                            struct vhost_virtqueue *vq,
1266                            struct rte_mbuf **pkts,
1267                            uint64_t *desc_addrs,
1268                            uint64_t *lens)
1269 {
1270         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1271         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1272         struct vring_packed_desc *descs = vq->desc_packed;
1273         uint16_t avail_idx = vq->last_avail_idx;
1274         uint16_t ids[PACKED_BATCH_SIZE];
1275         uint16_t i;
1276
1277         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1278                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1279                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1280                                         (uintptr_t)desc_addrs[i];
1281                 lens[i] = pkts[i]->pkt_len +
1282                         sizeof(struct virtio_net_hdr_mrg_rxbuf);
1283         }
1284
1285         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1286                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1287
1288         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1289
1290         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1291                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1292                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1293                            pkts[i]->pkt_len);
1294         }
1295
1296         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1297                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1298                                            lens[i]);
1299
1300         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1301                 ids[i] = descs[avail_idx + i].id;
1302
1303         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1304 }
1305
1306 static __rte_always_inline int
1307 virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
1308                            struct vhost_virtqueue *vq,
1309                            struct rte_mbuf **pkts)
1310 {
1311         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1312         uint64_t lens[PACKED_BATCH_SIZE];
1313
1314         if (virtio_dev_rx_sync_batch_check(dev, vq, pkts, desc_addrs, lens) == -1)
1315                 return -1;
1316
1317         if (vq->shadow_used_idx) {
1318                 do_data_copy_enqueue(dev, vq);
1319                 vhost_flush_enqueue_shadow_packed(dev, vq);
1320         }
1321
1322         virtio_dev_rx_batch_packed_copy(dev, vq, pkts, desc_addrs, lens);
1323
1324         return 0;
1325 }
1326
1327 static __rte_always_inline int16_t
1328 virtio_dev_rx_single_packed(struct virtio_net *dev,
1329                             struct vhost_virtqueue *vq,
1330                             struct rte_mbuf *pkt)
1331 {
1332         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1333         uint16_t nr_descs = 0;
1334
1335         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1336                                                  &nr_descs) < 0)) {
1337                 VHOST_LOG_DATA(DEBUG,
1338                                 "(%d) failed to get enough desc from vring\n",
1339                                 dev->vid);
1340                 return -1;
1341         }
1342
1343         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1344                         dev->vid, vq->last_avail_idx,
1345                         vq->last_avail_idx + nr_descs);
1346
1347         vq_inc_last_avail_packed(vq, nr_descs);
1348
1349         return 0;
1350 }
1351
1352 static __rte_noinline uint32_t
1353 virtio_dev_rx_packed(struct virtio_net *dev,
1354                      struct vhost_virtqueue *__rte_restrict vq,
1355                      struct rte_mbuf **__rte_restrict pkts,
1356                      uint32_t count)
1357 {
1358         uint32_t pkt_idx = 0;
1359
1360         do {
1361                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1362
1363                 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
1364                         if (!virtio_dev_rx_sync_batch_packed(dev, vq,
1365                                                         &pkts[pkt_idx])) {
1366                                 pkt_idx += PACKED_BATCH_SIZE;
1367                                 continue;
1368                         }
1369                 }
1370
1371                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1372                         break;
1373                 pkt_idx++;
1374
1375         } while (pkt_idx < count);
1376
1377         if (vq->shadow_used_idx) {
1378                 do_data_copy_enqueue(dev, vq);
1379                 vhost_flush_enqueue_shadow_packed(dev, vq);
1380         }
1381
1382         if (pkt_idx)
1383                 vhost_vring_call_packed(dev, vq);
1384
1385         return pkt_idx;
1386 }
1387
1388 static __rte_always_inline uint32_t
1389 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1390         struct rte_mbuf **pkts, uint32_t count)
1391 {
1392         struct vhost_virtqueue *vq;
1393         uint32_t nb_tx = 0;
1394
1395         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1396         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1397                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1398                         dev->vid, __func__, queue_id);
1399                 return 0;
1400         }
1401
1402         vq = dev->virtqueue[queue_id];
1403
1404         rte_spinlock_lock(&vq->access_lock);
1405
1406         if (unlikely(!vq->enabled))
1407                 goto out_access_unlock;
1408
1409         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1410                 vhost_user_iotlb_rd_lock(vq);
1411
1412         if (unlikely(!vq->access_ok))
1413                 if (unlikely(vring_translate(dev, vq) < 0))
1414                         goto out;
1415
1416         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1417         if (count == 0)
1418                 goto out;
1419
1420         if (vq_is_packed(dev))
1421                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1422         else
1423                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1424
1425 out:
1426         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1427                 vhost_user_iotlb_rd_unlock(vq);
1428
1429 out_access_unlock:
1430         rte_spinlock_unlock(&vq->access_lock);
1431
1432         return nb_tx;
1433 }
1434
1435 uint16_t
1436 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1437         struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1438 {
1439         struct virtio_net *dev = get_device(vid);
1440
1441         if (!dev)
1442                 return 0;
1443
1444         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1445                 VHOST_LOG_DATA(ERR,
1446                         "(%d) %s: built-in vhost net backend is disabled.\n",
1447                         dev->vid, __func__);
1448                 return 0;
1449         }
1450
1451         return virtio_dev_rx(dev, queue_id, pkts, count);
1452 }
1453
1454 static __rte_always_inline uint16_t
1455 virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
1456         uint16_t vq_size, uint16_t n_inflight)
1457 {
1458         return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
1459                 (vq_size - n_inflight + pkts_idx) % vq_size;
1460 }
1461
1462 static __rte_always_inline void
1463 store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem *d_ring,
1464                 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1465 {
1466         size_t elem_size = sizeof(struct vring_used_elem);
1467
1468         if (d_idx + count <= ring_size) {
1469                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1470         } else {
1471                 uint16_t size = ring_size - d_idx;
1472
1473                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1474                 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1475         }
1476 }
1477
1478 static __rte_always_inline void
1479 store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
1480                 struct vring_used_elem_packed *d_ring,
1481                 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1482 {
1483         size_t elem_size = sizeof(struct vring_used_elem_packed);
1484
1485         if (d_idx + count <= ring_size) {
1486                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1487         } else {
1488                 uint16_t size = ring_size - d_idx;
1489
1490                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1491                 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1492         }
1493 }
1494
1495 static __rte_noinline uint32_t
1496 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
1497         struct vhost_virtqueue *vq, uint16_t queue_id,
1498         struct rte_mbuf **pkts, uint32_t count)
1499 {
1500         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1501         uint32_t pkt_idx = 0, pkt_burst_idx = 0;
1502         uint16_t num_buffers;
1503         uint16_t avail_head;
1504
1505         struct vhost_async *async = vq->async;
1506         struct rte_vhost_iov_iter *iter = async->iov_iter;
1507         struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
1508         struct rte_vhost_iovec *iovec = async->iovec;
1509         struct async_inflight_info *pkts_info = async->pkts_info;
1510         uint32_t n_pkts = 0, pkt_err = 0;
1511         int32_t n_xfer;
1512         uint16_t iovec_idx = 0, it_idx = 0, slot_idx = 0;
1513
1514         /*
1515          * The ordering between avail index and desc reads need to be enforced.
1516          */
1517         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1518
1519         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1520
1521         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1522                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1523                 uint16_t nr_vec = 0;
1524
1525                 if (unlikely(reserve_avail_buf_split(dev, vq,
1526                                                 pkt_len, buf_vec, &num_buffers,
1527                                                 avail_head, &nr_vec) < 0)) {
1528                         VHOST_LOG_DATA(DEBUG,
1529                                 "(%d) failed to get enough desc from vring\n",
1530                                 dev->vid);
1531                         vq->shadow_used_idx -= num_buffers;
1532                         break;
1533                 }
1534
1535                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1536                         dev->vid, vq->last_avail_idx,
1537                         vq->last_avail_idx + num_buffers);
1538
1539                 if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers,
1540                                 &iovec[iovec_idx], &iter[it_idx]) < 0) {
1541                         vq->shadow_used_idx -= num_buffers;
1542                         break;
1543                 }
1544
1545                 async_fill_desc(&tdes[pkt_burst_idx++], &iter[it_idx]);
1546
1547                 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
1548                 pkts_info[slot_idx].descs = num_buffers;
1549                 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1550
1551                 iovec_idx += iter[it_idx].nr_segs;
1552                 it_idx++;
1553
1554                 vq->last_avail_idx += num_buffers;
1555
1556                 /*
1557                  * condition to trigger async device transfer:
1558                  * - unused async iov number is less than max vhost vector
1559                  */
1560                 if (unlikely(VHOST_MAX_ASYNC_VEC - iovec_idx < BUF_VECTOR_MAX)) {
1561                         n_xfer = async->ops.transfer_data(dev->vid,
1562                                         queue_id, tdes, 0, pkt_burst_idx);
1563                         if (likely(n_xfer >= 0)) {
1564                                 n_pkts = n_xfer;
1565                         } else {
1566                                 VHOST_LOG_DATA(ERR,
1567                                         "(%d) %s: failed to transfer data for queue id %d.\n",
1568                                         dev->vid, __func__, queue_id);
1569                                 n_pkts = 0;
1570                         }
1571
1572                         iovec_idx = 0;
1573                         it_idx = 0;
1574
1575                         if (unlikely(n_pkts < pkt_burst_idx)) {
1576                                 /*
1577                                  * log error packets number here and do actual
1578                                  * error processing when applications poll
1579                                  * completion
1580                                  */
1581                                 pkt_err = pkt_burst_idx - n_pkts;
1582                                 pkt_idx++;
1583                                 pkt_burst_idx = 0;
1584                                 break;
1585                         }
1586
1587                         pkt_burst_idx = 0;
1588                 }
1589         }
1590
1591         if (pkt_burst_idx) {
1592                 n_xfer = async->ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
1593                 if (likely(n_xfer >= 0)) {
1594                         n_pkts = n_xfer;
1595                 } else {
1596                         VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
1597                                 dev->vid, __func__, queue_id);
1598                         n_pkts = 0;
1599                 }
1600
1601                 if (unlikely(n_pkts < pkt_burst_idx))
1602                         pkt_err = pkt_burst_idx - n_pkts;
1603         }
1604
1605         if (unlikely(pkt_err)) {
1606                 uint16_t num_descs = 0;
1607
1608                 /* update number of completed packets */
1609                 pkt_idx -= pkt_err;
1610
1611                 /* calculate the sum of descriptors to revert */
1612                 while (pkt_err-- > 0) {
1613                         num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
1614                         slot_idx--;
1615                 }
1616
1617                 /* recover shadow used ring and available ring */
1618                 vq->shadow_used_idx -= num_descs;
1619                 vq->last_avail_idx -= num_descs;
1620         }
1621
1622         /* keep used descriptors */
1623         if (likely(vq->shadow_used_idx)) {
1624                 uint16_t to = async->desc_idx_split & (vq->size - 1);
1625
1626                 store_dma_desc_info_split(vq->shadow_used_split,
1627                                 async->descs_split, vq->size, 0, to,
1628                                 vq->shadow_used_idx);
1629
1630                 async->desc_idx_split += vq->shadow_used_idx;
1631                 async->pkts_idx += pkt_idx;
1632                 async->pkts_inflight_n += pkt_idx;
1633                 vq->shadow_used_idx = 0;
1634         }
1635
1636         return pkt_idx;
1637 }
1638
1639 static __rte_always_inline void
1640 vhost_update_used_packed(struct vhost_virtqueue *vq,
1641                         struct vring_used_elem_packed *shadow_ring,
1642                         uint16_t count)
1643 {
1644         int i;
1645         uint16_t used_idx = vq->last_used_idx;
1646         uint16_t head_idx = vq->last_used_idx;
1647         uint16_t head_flags = 0;
1648
1649         if (count == 0)
1650                 return;
1651
1652         /* Split loop in two to save memory barriers */
1653         for (i = 0; i < count; i++) {
1654                 vq->desc_packed[used_idx].id = shadow_ring[i].id;
1655                 vq->desc_packed[used_idx].len = shadow_ring[i].len;
1656
1657                 used_idx += shadow_ring[i].count;
1658                 if (used_idx >= vq->size)
1659                         used_idx -= vq->size;
1660         }
1661
1662         /* The ordering for storing desc flags needs to be enforced. */
1663         rte_atomic_thread_fence(__ATOMIC_RELEASE);
1664
1665         for (i = 0; i < count; i++) {
1666                 uint16_t flags;
1667
1668                 if (vq->shadow_used_packed[i].len)
1669                         flags = VRING_DESC_F_WRITE;
1670                 else
1671                         flags = 0;
1672
1673                 if (vq->used_wrap_counter) {
1674                         flags |= VRING_DESC_F_USED;
1675                         flags |= VRING_DESC_F_AVAIL;
1676                 } else {
1677                         flags &= ~VRING_DESC_F_USED;
1678                         flags &= ~VRING_DESC_F_AVAIL;
1679                 }
1680
1681                 if (i > 0) {
1682                         vq->desc_packed[vq->last_used_idx].flags = flags;
1683                 } else {
1684                         head_idx = vq->last_used_idx;
1685                         head_flags = flags;
1686                 }
1687
1688                 vq_inc_last_used_packed(vq, shadow_ring[i].count);
1689         }
1690
1691         vq->desc_packed[head_idx].flags = head_flags;
1692 }
1693
1694 static __rte_always_inline int
1695 vhost_enqueue_async_packed(struct virtio_net *dev,
1696                             struct vhost_virtqueue *vq,
1697                             struct rte_mbuf *pkt,
1698                             struct buf_vector *buf_vec,
1699                             uint16_t *nr_descs,
1700                             uint16_t *nr_buffers,
1701                             struct rte_vhost_iovec *iovec,
1702                             struct rte_vhost_iov_iter *iter)
1703 {
1704         uint16_t nr_vec = 0;
1705         uint16_t avail_idx = vq->last_avail_idx;
1706         uint16_t max_tries, tries = 0;
1707         uint16_t buf_id = 0;
1708         uint32_t len = 0;
1709         uint16_t desc_count = 0;
1710         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1711         uint32_t buffer_len[vq->size];
1712         uint16_t buffer_buf_id[vq->size];
1713         uint16_t buffer_desc_count[vq->size];
1714
1715         if (rxvq_is_mergeable(dev))
1716                 max_tries = vq->size - 1;
1717         else
1718                 max_tries = 1;
1719
1720         while (size > 0) {
1721                 /*
1722                  * if we tried all available ring items, and still
1723                  * can't get enough buf, it means something abnormal
1724                  * happened.
1725                  */
1726                 if (unlikely(++tries > max_tries))
1727                         return -1;
1728
1729                 if (unlikely(fill_vec_buf_packed(dev, vq,
1730                                                 avail_idx, &desc_count,
1731                                                 buf_vec, &nr_vec,
1732                                                 &buf_id, &len,
1733                                                 VHOST_ACCESS_RW) < 0))
1734                         return -1;
1735
1736                 len = RTE_MIN(len, size);
1737                 size -= len;
1738
1739                 buffer_len[*nr_buffers] = len;
1740                 buffer_buf_id[*nr_buffers] = buf_id;
1741                 buffer_desc_count[*nr_buffers] = desc_count;
1742                 *nr_buffers += 1;
1743                 *nr_descs += desc_count;
1744                 avail_idx += desc_count;
1745                 if (avail_idx >= vq->size)
1746                         avail_idx -= vq->size;
1747         }
1748
1749         if (unlikely(async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec,
1750                                         *nr_buffers, iovec, iter) < 0))
1751                 return -1;
1752
1753         vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
1754
1755         return 0;
1756 }
1757
1758 static __rte_always_inline int16_t
1759 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1760                             struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers,
1761                             struct rte_vhost_iovec *iovec, struct rte_vhost_iov_iter *iter)
1762 {
1763         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1764
1765         if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec, nr_descs, nr_buffers,
1766                                                  iovec, iter) < 0)) {
1767                 VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n", dev->vid);
1768                 return -1;
1769         }
1770
1771         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1772                         dev->vid, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
1773
1774         return 0;
1775 }
1776
1777 static __rte_always_inline void
1778 dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
1779                         uint32_t nr_err, uint32_t *pkt_idx)
1780 {
1781         uint16_t descs_err = 0;
1782         uint16_t buffers_err = 0;
1783         struct async_inflight_info *pkts_info = vq->async->pkts_info;
1784
1785         *pkt_idx -= nr_err;
1786         /* calculate the sum of buffers and descs of DMA-error packets. */
1787         while (nr_err-- > 0) {
1788                 descs_err += pkts_info[slot_idx % vq->size].descs;
1789                 buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
1790                 slot_idx--;
1791         }
1792
1793         if (vq->last_avail_idx >= descs_err) {
1794                 vq->last_avail_idx -= descs_err;
1795         } else {
1796                 vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
1797                 vq->avail_wrap_counter ^= 1;
1798         }
1799
1800         vq->shadow_used_idx -= buffers_err;
1801 }
1802
1803 static __rte_noinline uint32_t
1804 virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
1805         struct vhost_virtqueue *vq, uint16_t queue_id,
1806         struct rte_mbuf **pkts, uint32_t count)
1807 {
1808         uint32_t pkt_idx = 0, pkt_burst_idx = 0;
1809         uint32_t remained = count;
1810         int32_t n_xfer;
1811         uint16_t num_buffers;
1812         uint16_t num_descs;
1813
1814         struct vhost_async *async = vq->async;
1815         struct rte_vhost_iov_iter *iter = async->iov_iter;
1816         struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
1817         struct rte_vhost_iovec *iovec = async->iovec;
1818         struct async_inflight_info *pkts_info = async->pkts_info;
1819         uint32_t n_pkts = 0, pkt_err = 0;
1820         uint16_t slot_idx = 0;
1821         uint16_t iovec_idx = 0, it_idx = 0;
1822
1823         do {
1824                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1825
1826                 num_buffers = 0;
1827                 num_descs = 0;
1828                 if (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],
1829                                                 &num_descs, &num_buffers,
1830                                                 &iovec[iovec_idx], &iter[it_idx]) < 0))
1831                         break;
1832
1833                 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
1834
1835                 async_fill_desc(&tdes[pkt_burst_idx++], &iter[it_idx]);
1836                 pkts_info[slot_idx].descs = num_descs;
1837                 pkts_info[slot_idx].nr_buffers = num_buffers;
1838                 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1839                 iovec_idx += iter[it_idx].nr_segs;
1840                 it_idx++;
1841
1842                 pkt_idx++;
1843                 remained--;
1844                 vq_inc_last_avail_packed(vq, num_descs);
1845
1846                 /*
1847                  * condition to trigger async device transfer:
1848                  * - unused async iov number is less than max vhost vector
1849                  */
1850                 if (unlikely(VHOST_MAX_ASYNC_VEC - iovec_idx < BUF_VECTOR_MAX)) {
1851                         n_xfer = async->ops.transfer_data(dev->vid,
1852                                         queue_id, tdes, 0, pkt_burst_idx);
1853                         if (likely(n_xfer >= 0)) {
1854                                 n_pkts = n_xfer;
1855                         } else {
1856                                 VHOST_LOG_DATA(ERR,
1857                                         "(%d) %s: failed to transfer data for queue id %d.\n",
1858                                         dev->vid, __func__, queue_id);
1859                                 n_pkts = 0;
1860                         }
1861
1862                         iovec_idx = 0;
1863                         it_idx = 0;
1864
1865                         if (unlikely(n_pkts < pkt_burst_idx)) {
1866                                 /*
1867                                  * log error packets number here and do actual
1868                                  * error processing when applications poll
1869                                  * completion
1870                                  */
1871                                 pkt_err = pkt_burst_idx - n_pkts;
1872                                 pkt_burst_idx = 0;
1873                                 break;
1874                         }
1875
1876                         pkt_burst_idx = 0;
1877                 }
1878         } while (pkt_idx < count);
1879
1880         if (pkt_burst_idx) {
1881                 n_xfer = async->ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
1882                 if (likely(n_xfer >= 0)) {
1883                         n_pkts = n_xfer;
1884                 } else {
1885                         VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
1886                                 dev->vid, __func__, queue_id);
1887                         n_pkts = 0;
1888                 }
1889
1890                 if (unlikely(n_pkts < pkt_burst_idx))
1891                         pkt_err = pkt_burst_idx - n_pkts;
1892         }
1893
1894         if (unlikely(pkt_err))
1895                 dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
1896
1897         if (likely(vq->shadow_used_idx)) {
1898                 /* keep used descriptors. */
1899                 store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
1900                                         vq->size, 0, async->buffer_idx_packed,
1901                                         vq->shadow_used_idx);
1902
1903                 async->buffer_idx_packed += vq->shadow_used_idx;
1904                 if (async->buffer_idx_packed >= vq->size)
1905                         async->buffer_idx_packed -= vq->size;
1906
1907                 async->pkts_idx += pkt_idx;
1908                 if (async->pkts_idx >= vq->size)
1909                         async->pkts_idx -= vq->size;
1910
1911                 vq->shadow_used_idx = 0;
1912                 async->pkts_inflight_n += pkt_idx;
1913         }
1914
1915         return pkt_idx;
1916 }
1917
1918 static __rte_always_inline void
1919 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
1920 {
1921         struct vhost_async *async = vq->async;
1922         uint16_t nr_left = n_descs;
1923         uint16_t nr_copy;
1924         uint16_t to, from;
1925
1926         do {
1927                 from = async->last_desc_idx_split & (vq->size - 1);
1928                 nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
1929                 to = vq->last_used_idx & (vq->size - 1);
1930
1931                 if (to + nr_copy <= vq->size) {
1932                         rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1933                                         nr_copy * sizeof(struct vring_used_elem));
1934                 } else {
1935                         uint16_t size = vq->size - to;
1936
1937                         rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1938                                         size * sizeof(struct vring_used_elem));
1939                         rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
1940                                         (nr_copy - size) * sizeof(struct vring_used_elem));
1941                 }
1942
1943                 async->last_desc_idx_split += nr_copy;
1944                 vq->last_used_idx += nr_copy;
1945                 nr_left -= nr_copy;
1946         } while (nr_left > 0);
1947 }
1948
1949 static __rte_always_inline void
1950 write_back_completed_descs_packed(struct vhost_virtqueue *vq,
1951                                 uint16_t n_buffers)
1952 {
1953         struct vhost_async *async = vq->async;
1954         uint16_t nr_left = n_buffers;
1955         uint16_t from, to;
1956
1957         do {
1958                 from = async->last_buffer_idx_packed;
1959                 to = (from + nr_left) % vq->size;
1960                 if (to > from) {
1961                         vhost_update_used_packed(vq, async->buffers_packed + from, to - from);
1962                         async->last_buffer_idx_packed += nr_left;
1963                         nr_left = 0;
1964                 } else {
1965                         vhost_update_used_packed(vq, async->buffers_packed + from,
1966                                 vq->size - from);
1967                         async->last_buffer_idx_packed = 0;
1968                         nr_left -= vq->size - from;
1969                 }
1970         } while (nr_left > 0);
1971 }
1972
1973 static __rte_always_inline uint16_t
1974 vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
1975                 struct rte_mbuf **pkts, uint16_t count)
1976 {
1977         struct vhost_virtqueue *vq;
1978         struct vhost_async *async;
1979         struct async_inflight_info *pkts_info;
1980         int32_t n_cpl;
1981         uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
1982         uint16_t start_idx, pkts_idx, vq_size;
1983         uint16_t from, i;
1984
1985         vq = dev->virtqueue[queue_id];
1986         async = vq->async;
1987         pkts_idx = async->pkts_idx % vq->size;
1988         pkts_info = async->pkts_info;
1989         vq_size = vq->size;
1990         start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
1991                 vq_size, async->pkts_inflight_n);
1992
1993         if (count > async->last_pkts_n) {
1994                 n_cpl = async->ops.check_completed_copies(dev->vid,
1995                         queue_id, 0, count - async->last_pkts_n);
1996                 if (likely(n_cpl >= 0)) {
1997                         n_pkts_cpl = n_cpl;
1998                 } else {
1999                         VHOST_LOG_DATA(ERR,
2000                                 "(%d) %s: failed to check completed copies for queue id %d.\n",
2001                                 dev->vid, __func__, queue_id);
2002                         n_pkts_cpl = 0;
2003                 }
2004         }
2005
2006         n_pkts_cpl += async->last_pkts_n;
2007         n_pkts_put = RTE_MIN(n_pkts_cpl, count);
2008         if (unlikely(n_pkts_put == 0)) {
2009                 async->last_pkts_n = n_pkts_cpl;
2010                 return 0;
2011         }
2012
2013         if (vq_is_packed(dev)) {
2014                 for (i = 0; i < n_pkts_put; i++) {
2015                         from = (start_idx + i) % vq_size;
2016                         n_buffers += pkts_info[from].nr_buffers;
2017                         pkts[i] = pkts_info[from].mbuf;
2018                 }
2019         } else {
2020                 for (i = 0; i < n_pkts_put; i++) {
2021                         from = (start_idx + i) & (vq_size - 1);
2022                         n_descs += pkts_info[from].descs;
2023                         pkts[i] = pkts_info[from].mbuf;
2024                 }
2025         }
2026         async->last_pkts_n = n_pkts_cpl - n_pkts_put;
2027         async->pkts_inflight_n -= n_pkts_put;
2028
2029         if (likely(vq->enabled && vq->access_ok)) {
2030                 if (vq_is_packed(dev)) {
2031                         write_back_completed_descs_packed(vq, n_buffers);
2032
2033                         vhost_vring_call_packed(dev, vq);
2034                 } else {
2035                         write_back_completed_descs_split(vq, n_descs);
2036
2037                         __atomic_add_fetch(&vq->used->idx, n_descs,
2038                                         __ATOMIC_RELEASE);
2039                         vhost_vring_call_split(dev, vq);
2040                 }
2041         } else {
2042                 if (vq_is_packed(dev)) {
2043                         async->last_buffer_idx_packed += n_buffers;
2044                         if (async->last_buffer_idx_packed >= vq->size)
2045                                 async->last_buffer_idx_packed -= vq->size;
2046                 } else {
2047                         async->last_desc_idx_split += n_descs;
2048                 }
2049         }
2050
2051         return n_pkts_put;
2052 }
2053
2054 uint16_t
2055 rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
2056                 struct rte_mbuf **pkts, uint16_t count)
2057 {
2058         struct virtio_net *dev = get_device(vid);
2059         struct vhost_virtqueue *vq;
2060         uint16_t n_pkts_cpl = 0;
2061
2062         if (unlikely(!dev))
2063                 return 0;
2064
2065         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2066         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2067                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2068                         dev->vid, __func__, queue_id);
2069                 return 0;
2070         }
2071
2072         vq = dev->virtqueue[queue_id];
2073
2074         if (unlikely(!vq->async)) {
2075                 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
2076                         dev->vid, __func__, queue_id);
2077                 return 0;
2078         }
2079
2080         rte_spinlock_lock(&vq->access_lock);
2081
2082         n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
2083
2084         rte_spinlock_unlock(&vq->access_lock);
2085
2086         return n_pkts_cpl;
2087 }
2088
2089 uint16_t
2090 rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
2091                 struct rte_mbuf **pkts, uint16_t count)
2092 {
2093         struct virtio_net *dev = get_device(vid);
2094         struct vhost_virtqueue *vq;
2095         uint16_t n_pkts_cpl = 0;
2096
2097         if (!dev)
2098                 return 0;
2099
2100         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2101         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2102                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2103                         dev->vid, __func__, queue_id);
2104                 return 0;
2105         }
2106
2107         vq = dev->virtqueue[queue_id];
2108
2109         if (unlikely(!vq->async)) {
2110                 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
2111                         dev->vid, __func__, queue_id);
2112                 return 0;
2113         }
2114
2115         n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
2116
2117         return n_pkts_cpl;
2118 }
2119
2120 static __rte_always_inline uint32_t
2121 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
2122         struct rte_mbuf **pkts, uint32_t count)
2123 {
2124         struct vhost_virtqueue *vq;
2125         uint32_t nb_tx = 0;
2126
2127         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2128         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2129                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2130                         dev->vid, __func__, queue_id);
2131                 return 0;
2132         }
2133
2134         vq = dev->virtqueue[queue_id];
2135
2136         rte_spinlock_lock(&vq->access_lock);
2137
2138         if (unlikely(!vq->enabled || !vq->async))
2139                 goto out_access_unlock;
2140
2141         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2142                 vhost_user_iotlb_rd_lock(vq);
2143
2144         if (unlikely(!vq->access_ok))
2145                 if (unlikely(vring_translate(dev, vq) < 0))
2146                         goto out;
2147
2148         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
2149         if (count == 0)
2150                 goto out;
2151
2152         if (vq_is_packed(dev))
2153                 nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, queue_id,
2154                                 pkts, count);
2155         else
2156                 nb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,
2157                                 pkts, count);
2158
2159 out:
2160         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2161                 vhost_user_iotlb_rd_unlock(vq);
2162
2163 out_access_unlock:
2164         rte_spinlock_unlock(&vq->access_lock);
2165
2166         return nb_tx;
2167 }
2168
2169 uint16_t
2170 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
2171                 struct rte_mbuf **pkts, uint16_t count)
2172 {
2173         struct virtio_net *dev = get_device(vid);
2174
2175         if (!dev)
2176                 return 0;
2177
2178         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2179                 VHOST_LOG_DATA(ERR,
2180                         "(%d) %s: built-in vhost net backend is disabled.\n",
2181                         dev->vid, __func__);
2182                 return 0;
2183         }
2184
2185         return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
2186 }
2187
2188 static inline bool
2189 virtio_net_with_host_offload(struct virtio_net *dev)
2190 {
2191         if (dev->features &
2192                         ((1ULL << VIRTIO_NET_F_CSUM) |
2193                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
2194                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2195                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
2196                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
2197                 return true;
2198
2199         return false;
2200 }
2201
2202 static int
2203 parse_headers(struct rte_mbuf *m, uint8_t *l4_proto)
2204 {
2205         struct rte_ipv4_hdr *ipv4_hdr;
2206         struct rte_ipv6_hdr *ipv6_hdr;
2207         struct rte_ether_hdr *eth_hdr;
2208         uint16_t ethertype;
2209         uint16_t data_len = rte_pktmbuf_data_len(m);
2210
2211         if (data_len < sizeof(struct rte_ether_hdr))
2212                 return -EINVAL;
2213
2214         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2215
2216         m->l2_len = sizeof(struct rte_ether_hdr);
2217         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
2218
2219         if (ethertype == RTE_ETHER_TYPE_VLAN) {
2220                 if (data_len < sizeof(struct rte_ether_hdr) +
2221                                 sizeof(struct rte_vlan_hdr))
2222                         goto error;
2223
2224                 struct rte_vlan_hdr *vlan_hdr =
2225                         (struct rte_vlan_hdr *)(eth_hdr + 1);
2226
2227                 m->l2_len += sizeof(struct rte_vlan_hdr);
2228                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
2229         }
2230
2231         switch (ethertype) {
2232         case RTE_ETHER_TYPE_IPV4:
2233                 if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))
2234                         goto error;
2235                 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2236                                 m->l2_len);
2237                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
2238                 if (data_len < m->l2_len + m->l3_len)
2239                         goto error;
2240                 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
2241                 *l4_proto = ipv4_hdr->next_proto_id;
2242                 break;
2243         case RTE_ETHER_TYPE_IPV6:
2244                 if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
2245                         goto error;
2246                 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
2247                                 m->l2_len);
2248                 m->l3_len = sizeof(struct rte_ipv6_hdr);
2249                 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
2250                 *l4_proto = ipv6_hdr->proto;
2251                 break;
2252         default:
2253                 /* a valid L3 header is needed for further L4 parsing */
2254                 goto error;
2255         }
2256
2257         /* both CSUM and GSO need a valid L4 header */
2258         switch (*l4_proto) {
2259         case IPPROTO_TCP:
2260                 if (data_len < m->l2_len + m->l3_len +
2261                                 sizeof(struct rte_tcp_hdr))
2262                         goto error;
2263                 break;
2264         case IPPROTO_UDP:
2265                 if (data_len < m->l2_len + m->l3_len +
2266                                 sizeof(struct rte_udp_hdr))
2267                         goto error;
2268                 break;
2269         case IPPROTO_SCTP:
2270                 if (data_len < m->l2_len + m->l3_len +
2271                                 sizeof(struct rte_sctp_hdr))
2272                         goto error;
2273                 break;
2274         default:
2275                 goto error;
2276         }
2277
2278         return 0;
2279
2280 error:
2281         m->l2_len = 0;
2282         m->l3_len = 0;
2283         m->ol_flags = 0;
2284         return -EINVAL;
2285 }
2286
2287 static __rte_always_inline void
2288 vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
2289 {
2290         uint8_t l4_proto = 0;
2291         struct rte_tcp_hdr *tcp_hdr = NULL;
2292         uint16_t tcp_len;
2293         uint16_t data_len = rte_pktmbuf_data_len(m);
2294
2295         if (parse_headers(m, &l4_proto) < 0)
2296                 return;
2297
2298         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2299                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
2300                         switch (hdr->csum_offset) {
2301                         case (offsetof(struct rte_tcp_hdr, cksum)):
2302                                 if (l4_proto != IPPROTO_TCP)
2303                                         goto error;
2304                                 m->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
2305                                 break;
2306                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
2307                                 if (l4_proto != IPPROTO_UDP)
2308                                         goto error;
2309                                 m->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
2310                                 break;
2311                         case (offsetof(struct rte_sctp_hdr, cksum)):
2312                                 if (l4_proto != IPPROTO_SCTP)
2313                                         goto error;
2314                                 m->ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
2315                                 break;
2316                         default:
2317                                 goto error;
2318                         }
2319                 } else {
2320                         goto error;
2321                 }
2322         }
2323
2324         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2325                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2326                 case VIRTIO_NET_HDR_GSO_TCPV4:
2327                 case VIRTIO_NET_HDR_GSO_TCPV6:
2328                         if (l4_proto != IPPROTO_TCP)
2329                                 goto error;
2330                         tcp_hdr = rte_pktmbuf_mtod_offset(m,
2331                                         struct rte_tcp_hdr *,
2332                                         m->l2_len + m->l3_len);
2333                         tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
2334                         if (data_len < m->l2_len + m->l3_len + tcp_len)
2335                                 goto error;
2336                         m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
2337                         m->tso_segsz = hdr->gso_size;
2338                         m->l4_len = tcp_len;
2339                         break;
2340                 case VIRTIO_NET_HDR_GSO_UDP:
2341                         if (l4_proto != IPPROTO_UDP)
2342                                 goto error;
2343                         m->ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
2344                         m->tso_segsz = hdr->gso_size;
2345                         m->l4_len = sizeof(struct rte_udp_hdr);
2346                         break;
2347                 default:
2348                         VHOST_LOG_DATA(WARNING,
2349                                 "unsupported gso type %u.\n", hdr->gso_type);
2350                         goto error;
2351                 }
2352         }
2353         return;
2354
2355 error:
2356         m->l2_len = 0;
2357         m->l3_len = 0;
2358         m->ol_flags = 0;
2359 }
2360
2361 static __rte_always_inline void
2362 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m,
2363         bool legacy_ol_flags)
2364 {
2365         struct rte_net_hdr_lens hdr_lens;
2366         int l4_supported = 0;
2367         uint32_t ptype;
2368
2369         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
2370                 return;
2371
2372         if (legacy_ol_flags) {
2373                 vhost_dequeue_offload_legacy(hdr, m);
2374                 return;
2375         }
2376
2377         m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
2378
2379         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
2380         m->packet_type = ptype;
2381         if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
2382             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
2383             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
2384                 l4_supported = 1;
2385
2386         /* According to Virtio 1.1 spec, the device only needs to look at
2387          * VIRTIO_NET_HDR_F_NEEDS_CSUM in the packet transmission path.
2388          * This differs from the processing incoming packets path where the
2389          * driver could rely on VIRTIO_NET_HDR_F_DATA_VALID flag set by the
2390          * device.
2391          *
2392          * 5.1.6.2.1 Driver Requirements: Packet Transmission
2393          * The driver MUST NOT set the VIRTIO_NET_HDR_F_DATA_VALID and
2394          * VIRTIO_NET_HDR_F_RSC_INFO bits in flags.
2395          *
2396          * 5.1.6.2.2 Device Requirements: Packet Transmission
2397          * The device MUST ignore flag bits that it does not recognize.
2398          */
2399         if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2400                 uint32_t hdrlen;
2401
2402                 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
2403                 if (hdr->csum_start <= hdrlen && l4_supported != 0) {
2404                         m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
2405                 } else {
2406                         /* Unknown proto or tunnel, do sw cksum. We can assume
2407                          * the cksum field is in the first segment since the
2408                          * buffers we provided to the host are large enough.
2409                          * In case of SCTP, this will be wrong since it's a CRC
2410                          * but there's nothing we can do.
2411                          */
2412                         uint16_t csum = 0, off;
2413
2414                         if (rte_raw_cksum_mbuf(m, hdr->csum_start,
2415                                         rte_pktmbuf_pkt_len(m) - hdr->csum_start, &csum) < 0)
2416                                 return;
2417                         if (likely(csum != 0xffff))
2418                                 csum = ~csum;
2419                         off = hdr->csum_offset + hdr->csum_start;
2420                         if (rte_pktmbuf_data_len(m) >= off + 1)
2421                                 *rte_pktmbuf_mtod_offset(m, uint16_t *, off) = csum;
2422                 }
2423         }
2424
2425         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2426                 if (hdr->gso_size == 0)
2427                         return;
2428
2429                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2430                 case VIRTIO_NET_HDR_GSO_TCPV4:
2431                 case VIRTIO_NET_HDR_GSO_TCPV6:
2432                         if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
2433                                 break;
2434                         m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2435                         m->tso_segsz = hdr->gso_size;
2436                         break;
2437                 case VIRTIO_NET_HDR_GSO_UDP:
2438                         if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
2439                                 break;
2440                         m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2441                         m->tso_segsz = hdr->gso_size;
2442                         break;
2443                 default:
2444                         break;
2445                 }
2446         }
2447 }
2448
2449 static __rte_noinline void
2450 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
2451                 struct buf_vector *buf_vec)
2452 {
2453         uint64_t len;
2454         uint64_t remain = sizeof(struct virtio_net_hdr);
2455         uint64_t src;
2456         uint64_t dst = (uint64_t)(uintptr_t)hdr;
2457
2458         while (remain) {
2459                 len = RTE_MIN(remain, buf_vec->buf_len);
2460                 src = buf_vec->buf_addr;
2461                 rte_memcpy((void *)(uintptr_t)dst,
2462                                 (void *)(uintptr_t)src, len);
2463
2464                 remain -= len;
2465                 dst += len;
2466                 buf_vec++;
2467         }
2468 }
2469
2470 static __rte_always_inline int
2471 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
2472                   struct buf_vector *buf_vec, uint16_t nr_vec,
2473                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
2474                   bool legacy_ol_flags)
2475 {
2476         uint32_t buf_avail, buf_offset;
2477         uint64_t buf_addr, buf_len;
2478         uint32_t mbuf_avail, mbuf_offset;
2479         uint32_t cpy_len;
2480         struct rte_mbuf *cur = m, *prev = m;
2481         struct virtio_net_hdr tmp_hdr;
2482         struct virtio_net_hdr *hdr = NULL;
2483         /* A counter to avoid desc dead loop chain */
2484         uint16_t vec_idx = 0;
2485         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
2486         int error = 0;
2487
2488         buf_addr = buf_vec[vec_idx].buf_addr;
2489         buf_len = buf_vec[vec_idx].buf_len;
2490
2491         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
2492                 error = -1;
2493                 goto out;
2494         }
2495
2496         if (virtio_net_with_host_offload(dev)) {
2497                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
2498                         /*
2499                          * No luck, the virtio-net header doesn't fit
2500                          * in a contiguous virtual area.
2501                          */
2502                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
2503                         hdr = &tmp_hdr;
2504                 } else {
2505                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
2506                 }
2507         }
2508
2509         /*
2510          * A virtio driver normally uses at least 2 desc buffers
2511          * for Tx: the first for storing the header, and others
2512          * for storing the data.
2513          */
2514         if (unlikely(buf_len < dev->vhost_hlen)) {
2515                 buf_offset = dev->vhost_hlen - buf_len;
2516                 vec_idx++;
2517                 buf_addr = buf_vec[vec_idx].buf_addr;
2518                 buf_len = buf_vec[vec_idx].buf_len;
2519                 buf_avail  = buf_len - buf_offset;
2520         } else if (buf_len == dev->vhost_hlen) {
2521                 if (unlikely(++vec_idx >= nr_vec))
2522                         goto out;
2523                 buf_addr = buf_vec[vec_idx].buf_addr;
2524                 buf_len = buf_vec[vec_idx].buf_len;
2525
2526                 buf_offset = 0;
2527                 buf_avail = buf_len;
2528         } else {
2529                 buf_offset = dev->vhost_hlen;
2530                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
2531         }
2532
2533         PRINT_PACKET(dev,
2534                         (uintptr_t)(buf_addr + buf_offset),
2535                         (uint32_t)buf_avail, 0);
2536
2537         mbuf_offset = 0;
2538         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
2539         while (1) {
2540                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
2541
2542                 if (likely(cpy_len > MAX_BATCH_LEN ||
2543                                         vq->batch_copy_nb_elems >= vq->size ||
2544                                         (hdr && cur == m))) {
2545                         rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
2546                                                 mbuf_offset),
2547                                         (void *)((uintptr_t)(buf_addr +
2548                                                         buf_offset)), cpy_len);
2549                 } else {
2550                         batch_copy[vq->batch_copy_nb_elems].dst =
2551                                 rte_pktmbuf_mtod_offset(cur, void *,
2552                                                 mbuf_offset);
2553                         batch_copy[vq->batch_copy_nb_elems].src =
2554                                 (void *)((uintptr_t)(buf_addr + buf_offset));
2555                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
2556                         vq->batch_copy_nb_elems++;
2557                 }
2558
2559                 mbuf_avail  -= cpy_len;
2560                 mbuf_offset += cpy_len;
2561                 buf_avail -= cpy_len;
2562                 buf_offset += cpy_len;
2563
2564                 /* This buf reaches to its end, get the next one */
2565                 if (buf_avail == 0) {
2566                         if (++vec_idx >= nr_vec)
2567                                 break;
2568
2569                         buf_addr = buf_vec[vec_idx].buf_addr;
2570                         buf_len = buf_vec[vec_idx].buf_len;
2571
2572                         buf_offset = 0;
2573                         buf_avail  = buf_len;
2574
2575                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
2576                                         (uint32_t)buf_avail, 0);
2577                 }
2578
2579                 /*
2580                  * This mbuf reaches to its end, get a new one
2581                  * to hold more data.
2582                  */
2583                 if (mbuf_avail == 0) {
2584                         cur = rte_pktmbuf_alloc(mbuf_pool);
2585                         if (unlikely(cur == NULL)) {
2586                                 VHOST_LOG_DATA(ERR, "Failed to "
2587                                         "allocate memory for mbuf.\n");
2588                                 error = -1;
2589                                 goto out;
2590                         }
2591
2592                         prev->next = cur;
2593                         prev->data_len = mbuf_offset;
2594                         m->nb_segs += 1;
2595                         m->pkt_len += mbuf_offset;
2596                         prev = cur;
2597
2598                         mbuf_offset = 0;
2599                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2600                 }
2601         }
2602
2603         prev->data_len = mbuf_offset;
2604         m->pkt_len    += mbuf_offset;
2605
2606         if (hdr)
2607                 vhost_dequeue_offload(hdr, m, legacy_ol_flags);
2608
2609 out:
2610
2611         return error;
2612 }
2613
2614 static void
2615 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2616 {
2617         rte_free(opaque);
2618 }
2619
2620 static int
2621 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
2622 {
2623         struct rte_mbuf_ext_shared_info *shinfo = NULL;
2624         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2625         uint16_t buf_len;
2626         rte_iova_t iova;
2627         void *buf;
2628
2629         total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2630         total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2631
2632         if (unlikely(total_len > UINT16_MAX))
2633                 return -ENOSPC;
2634
2635         buf_len = total_len;
2636         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2637         if (unlikely(buf == NULL))
2638                 return -ENOMEM;
2639
2640         /* Initialize shinfo */
2641         shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2642                                                 virtio_dev_extbuf_free, buf);
2643         if (unlikely(shinfo == NULL)) {
2644                 rte_free(buf);
2645                 VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
2646                 return -1;
2647         }
2648
2649         iova = rte_malloc_virt2iova(buf);
2650         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2651         rte_pktmbuf_reset_headroom(pkt);
2652
2653         return 0;
2654 }
2655
2656 /*
2657  * Prepare a host supported pktmbuf.
2658  */
2659 static __rte_always_inline int
2660 virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
2661                          uint32_t data_len)
2662 {
2663         if (rte_pktmbuf_tailroom(pkt) >= data_len)
2664                 return 0;
2665
2666         /* attach an external buffer if supported */
2667         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
2668                 return 0;
2669
2670         /* check if chained buffers are allowed */
2671         if (!dev->linearbuf)
2672                 return 0;
2673
2674         return -1;
2675 }
2676
2677 __rte_always_inline
2678 static uint16_t
2679 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2680         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
2681         bool legacy_ol_flags)
2682 {
2683         uint16_t i;
2684         uint16_t free_entries;
2685         uint16_t dropped = 0;
2686         static bool allocerr_warned;
2687
2688         /*
2689          * The ordering between avail index and
2690          * desc reads needs to be enforced.
2691          */
2692         free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2693                         vq->last_avail_idx;
2694         if (free_entries == 0)
2695                 return 0;
2696
2697         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2698
2699         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2700
2701         count = RTE_MIN(count, MAX_PKT_BURST);
2702         count = RTE_MIN(count, free_entries);
2703         VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
2704                         dev->vid, count);
2705
2706         if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2707                 return 0;
2708
2709         for (i = 0; i < count; i++) {
2710                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2711                 uint16_t head_idx;
2712                 uint32_t buf_len;
2713                 uint16_t nr_vec = 0;
2714                 int err;
2715
2716                 if (unlikely(fill_vec_buf_split(dev, vq,
2717                                                 vq->last_avail_idx + i,
2718                                                 &nr_vec, buf_vec,
2719                                                 &head_idx, &buf_len,
2720                                                 VHOST_ACCESS_RO) < 0))
2721                         break;
2722
2723                 update_shadow_used_ring_split(vq, head_idx, 0);
2724
2725                 err = virtio_dev_pktmbuf_prep(dev, pkts[i], buf_len);
2726                 if (unlikely(err)) {
2727                         /*
2728                          * mbuf allocation fails for jumbo packets when external
2729                          * buffer allocation is not allowed and linear buffer
2730                          * is required. Drop this packet.
2731                          */
2732                         if (!allocerr_warned) {
2733                                 VHOST_LOG_DATA(ERR,
2734                                         "Failed mbuf alloc of size %d from %s on %s.\n",
2735                                         buf_len, mbuf_pool->name, dev->ifname);
2736                                 allocerr_warned = true;
2737                         }
2738                         dropped += 1;
2739                         i++;
2740                         break;
2741                 }
2742
2743                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2744                                 mbuf_pool, legacy_ol_flags);
2745                 if (unlikely(err)) {
2746                         if (!allocerr_warned) {
2747                                 VHOST_LOG_DATA(ERR,
2748                                         "Failed to copy desc to mbuf on %s.\n",
2749                                         dev->ifname);
2750                                 allocerr_warned = true;
2751                         }
2752                         dropped += 1;
2753                         i++;
2754                         break;
2755                 }
2756         }
2757
2758         if (dropped)
2759                 rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1);
2760
2761         vq->last_avail_idx += i;
2762
2763         do_data_copy_dequeue(vq);
2764         if (unlikely(i < count))
2765                 vq->shadow_used_idx = i;
2766         if (likely(vq->shadow_used_idx)) {
2767                 flush_shadow_used_ring_split(dev, vq);
2768                 vhost_vring_call_split(dev, vq);
2769         }
2770
2771         return (i - dropped);
2772 }
2773
2774 __rte_noinline
2775 static uint16_t
2776 virtio_dev_tx_split_legacy(struct virtio_net *dev,
2777         struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2778         struct rte_mbuf **pkts, uint16_t count)
2779 {
2780         return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
2781 }
2782
2783 __rte_noinline
2784 static uint16_t
2785 virtio_dev_tx_split_compliant(struct virtio_net *dev,
2786         struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2787         struct rte_mbuf **pkts, uint16_t count)
2788 {
2789         return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
2790 }
2791
2792 static __rte_always_inline int
2793 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2794                                  struct vhost_virtqueue *vq,
2795                                  struct rte_mbuf **pkts,
2796                                  uint16_t avail_idx,
2797                                  uintptr_t *desc_addrs,
2798                                  uint16_t *ids)
2799 {
2800         bool wrap = vq->avail_wrap_counter;
2801         struct vring_packed_desc *descs = vq->desc_packed;
2802         uint64_t lens[PACKED_BATCH_SIZE];
2803         uint64_t buf_lens[PACKED_BATCH_SIZE];
2804         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2805         uint16_t flags, i;
2806
2807         if (unlikely(avail_idx & PACKED_BATCH_MASK))
2808                 return -1;
2809         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2810                 return -1;
2811
2812         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2813                 flags = descs[avail_idx + i].flags;
2814                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2815                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
2816                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2817                         return -1;
2818         }
2819
2820         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2821
2822         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2823                 lens[i] = descs[avail_idx + i].len;
2824
2825         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2826                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2827                                                   descs[avail_idx + i].addr,
2828                                                   &lens[i], VHOST_ACCESS_RW);
2829         }
2830
2831         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2832                 if (unlikely(!desc_addrs[i]))
2833                         return -1;
2834                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2835                         return -1;
2836         }
2837
2838         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2839                 if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
2840                         goto err;
2841         }
2842
2843         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2844                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2845
2846         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2847                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2848                         goto err;
2849         }
2850
2851         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2852                 pkts[i]->pkt_len = lens[i] - buf_offset;
2853                 pkts[i]->data_len = pkts[i]->pkt_len;
2854                 ids[i] = descs[avail_idx + i].id;
2855         }
2856
2857         return 0;
2858
2859 err:
2860         return -1;
2861 }
2862
2863 static __rte_always_inline int
2864 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2865                            struct vhost_virtqueue *vq,
2866                            struct rte_mbuf **pkts,
2867                            bool legacy_ol_flags)
2868 {
2869         uint16_t avail_idx = vq->last_avail_idx;
2870         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2871         struct virtio_net_hdr *hdr;
2872         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2873         uint16_t ids[PACKED_BATCH_SIZE];
2874         uint16_t i;
2875
2876         if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
2877                                              desc_addrs, ids))
2878                 return -1;
2879
2880         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2881                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2882
2883         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2884                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2885                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2886                            pkts[i]->pkt_len);
2887
2888         if (virtio_net_with_host_offload(dev)) {
2889                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2890                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2891                         vhost_dequeue_offload(hdr, pkts[i], legacy_ol_flags);
2892                 }
2893         }
2894
2895         if (virtio_net_is_inorder(dev))
2896                 vhost_shadow_dequeue_batch_packed_inorder(vq,
2897                         ids[PACKED_BATCH_SIZE - 1]);
2898         else
2899                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2900
2901         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2902
2903         return 0;
2904 }
2905
2906 static __rte_always_inline int
2907 vhost_dequeue_single_packed(struct virtio_net *dev,
2908                             struct vhost_virtqueue *vq,
2909                             struct rte_mempool *mbuf_pool,
2910                             struct rte_mbuf *pkts,
2911                             uint16_t *buf_id,
2912                             uint16_t *desc_count,
2913                             bool legacy_ol_flags)
2914 {
2915         struct buf_vector buf_vec[BUF_VECTOR_MAX];
2916         uint32_t buf_len;
2917         uint16_t nr_vec = 0;
2918         int err;
2919         static bool allocerr_warned;
2920
2921         if (unlikely(fill_vec_buf_packed(dev, vq,
2922                                          vq->last_avail_idx, desc_count,
2923                                          buf_vec, &nr_vec,
2924                                          buf_id, &buf_len,
2925                                          VHOST_ACCESS_RO) < 0))
2926                 return -1;
2927
2928         if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
2929                 if (!allocerr_warned) {
2930                         VHOST_LOG_DATA(ERR,
2931                                 "Failed mbuf alloc of size %d from %s on %s.\n",
2932                                 buf_len, mbuf_pool->name, dev->ifname);
2933                         allocerr_warned = true;
2934                 }
2935                 return -1;
2936         }
2937
2938         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
2939                                 mbuf_pool, legacy_ol_flags);
2940         if (unlikely(err)) {
2941                 if (!allocerr_warned) {
2942                         VHOST_LOG_DATA(ERR,
2943                                 "Failed to copy desc to mbuf on %s.\n",
2944                                 dev->ifname);
2945                         allocerr_warned = true;
2946                 }
2947                 return -1;
2948         }
2949
2950         return 0;
2951 }
2952
2953 static __rte_always_inline int
2954 virtio_dev_tx_single_packed(struct virtio_net *dev,
2955                             struct vhost_virtqueue *vq,
2956                             struct rte_mempool *mbuf_pool,
2957                             struct rte_mbuf *pkts,
2958                             bool legacy_ol_flags)
2959 {
2960
2961         uint16_t buf_id, desc_count = 0;
2962         int ret;
2963
2964         ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2965                                         &desc_count, legacy_ol_flags);
2966
2967         if (likely(desc_count > 0)) {
2968                 if (virtio_net_is_inorder(dev))
2969                         vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
2970                                                                    desc_count);
2971                 else
2972                         vhost_shadow_dequeue_single_packed(vq, buf_id,
2973                                         desc_count);
2974
2975                 vq_inc_last_avail_packed(vq, desc_count);
2976         }
2977
2978         return ret;
2979 }
2980
2981 __rte_always_inline
2982 static uint16_t
2983 virtio_dev_tx_packed(struct virtio_net *dev,
2984                      struct vhost_virtqueue *__rte_restrict vq,
2985                      struct rte_mempool *mbuf_pool,
2986                      struct rte_mbuf **__rte_restrict pkts,
2987                      uint32_t count,
2988                      bool legacy_ol_flags)
2989 {
2990         uint32_t pkt_idx = 0;
2991
2992         if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2993                 return 0;
2994
2995         do {
2996                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2997
2998                 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
2999                         if (!virtio_dev_tx_batch_packed(dev, vq,
3000                                                         &pkts[pkt_idx],
3001                                                         legacy_ol_flags)) {
3002                                 pkt_idx += PACKED_BATCH_SIZE;
3003                                 continue;
3004                         }
3005                 }
3006
3007                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
3008                                                 pkts[pkt_idx],
3009                                                 legacy_ol_flags))
3010                         break;
3011                 pkt_idx++;
3012         } while (pkt_idx < count);
3013
3014         if (pkt_idx != count)
3015                 rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
3016
3017         if (vq->shadow_used_idx) {
3018                 do_data_copy_dequeue(vq);
3019
3020                 vhost_flush_dequeue_shadow_packed(dev, vq);
3021                 vhost_vring_call_packed(dev, vq);
3022         }
3023
3024         return pkt_idx;
3025 }
3026
3027 __rte_noinline
3028 static uint16_t
3029 virtio_dev_tx_packed_legacy(struct virtio_net *dev,
3030         struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3031         struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3032 {
3033         return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
3034 }
3035
3036 __rte_noinline
3037 static uint16_t
3038 virtio_dev_tx_packed_compliant(struct virtio_net *dev,
3039         struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3040         struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3041 {
3042         return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
3043 }
3044
3045 uint16_t
3046 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
3047         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
3048 {
3049         struct virtio_net *dev;
3050         struct rte_mbuf *rarp_mbuf = NULL;
3051         struct vhost_virtqueue *vq;
3052         int16_t success = 1;
3053
3054         dev = get_device(vid);
3055         if (!dev)
3056                 return 0;
3057
3058         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
3059                 VHOST_LOG_DATA(ERR,
3060                         "(%d) %s: built-in vhost net backend is disabled.\n",
3061                         dev->vid, __func__);
3062                 return 0;
3063         }
3064
3065         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
3066                 VHOST_LOG_DATA(ERR,
3067                         "(%d) %s: invalid virtqueue idx %d.\n",
3068                         dev->vid, __func__, queue_id);
3069                 return 0;
3070         }
3071
3072         vq = dev->virtqueue[queue_id];
3073
3074         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
3075                 return 0;
3076
3077         if (unlikely(!vq->enabled)) {
3078                 count = 0;
3079                 goto out_access_unlock;
3080         }
3081
3082         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3083                 vhost_user_iotlb_rd_lock(vq);
3084
3085         if (unlikely(!vq->access_ok))
3086                 if (unlikely(vring_translate(dev, vq) < 0)) {
3087                         count = 0;
3088                         goto out;
3089                 }
3090
3091         /*
3092          * Construct a RARP broadcast packet, and inject it to the "pkts"
3093          * array, to looks like that guest actually send such packet.
3094          *
3095          * Check user_send_rarp() for more information.
3096          *
3097          * broadcast_rarp shares a cacheline in the virtio_net structure
3098          * with some fields that are accessed during enqueue and
3099          * __atomic_compare_exchange_n causes a write if performed compare
3100          * and exchange. This could result in false sharing between enqueue
3101          * and dequeue.
3102          *
3103          * Prevent unnecessary false sharing by reading broadcast_rarp first
3104          * and only performing compare and exchange if the read indicates it
3105          * is likely to be set.
3106          */
3107         if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
3108                         __atomic_compare_exchange_n(&dev->broadcast_rarp,
3109                         &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
3110
3111                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
3112                 if (rarp_mbuf == NULL) {
3113                         VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
3114                         count = 0;
3115                         goto out;
3116                 }
3117                 /*
3118                  * Inject it to the head of "pkts" array, so that switch's mac
3119                  * learning table will get updated first.
3120                  */
3121                 pkts[0] = rarp_mbuf;
3122                 pkts++;
3123                 count -= 1;
3124         }
3125
3126         if (vq_is_packed(dev)) {
3127                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3128                         count = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
3129                 else
3130                         count = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
3131         } else {
3132                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3133                         count = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
3134                 else
3135                         count = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
3136         }
3137
3138 out:
3139         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3140                 vhost_user_iotlb_rd_unlock(vq);
3141
3142 out_access_unlock:
3143         rte_spinlock_unlock(&vq->access_lock);
3144
3145         if (unlikely(rarp_mbuf != NULL))
3146                 count += 1;
3147
3148         return count;
3149 }