a2044aae1bbe6df236cbefde8ccbe593549ed370
[dpdk.git] / lib / vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_net.h>
12 #include <rte_ether.h>
13 #include <rte_ip.h>
14 #include <rte_vhost.h>
15 #include <rte_tcp.h>
16 #include <rte_udp.h>
17 #include <rte_sctp.h>
18 #include <rte_arp.h>
19 #include <rte_spinlock.h>
20 #include <rte_malloc.h>
21 #include <rte_vhost_async.h>
22
23 #include "iotlb.h"
24 #include "vhost.h"
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static  __rte_always_inline bool
35 virtio_net_is_inorder(struct virtio_net *dev)
36 {
37         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
38 }
39
40 static bool
41 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
42 {
43         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
44 }
45
46 static inline void
47 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
48 {
49         struct batch_copy_elem *elem = vq->batch_copy_elems;
50         uint16_t count = vq->batch_copy_nb_elems;
51         int i;
52
53         for (i = 0; i < count; i++) {
54                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
55                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
56                                            elem[i].len);
57                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
58         }
59
60         vq->batch_copy_nb_elems = 0;
61 }
62
63 static inline void
64 do_data_copy_dequeue(struct vhost_virtqueue *vq)
65 {
66         struct batch_copy_elem *elem = vq->batch_copy_elems;
67         uint16_t count = vq->batch_copy_nb_elems;
68         int i;
69
70         for (i = 0; i < count; i++)
71                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
72
73         vq->batch_copy_nb_elems = 0;
74 }
75
76 static __rte_always_inline void
77 do_flush_shadow_used_ring_split(struct virtio_net *dev,
78                         struct vhost_virtqueue *vq,
79                         uint16_t to, uint16_t from, uint16_t size)
80 {
81         rte_memcpy(&vq->used->ring[to],
82                         &vq->shadow_used_split[from],
83                         size * sizeof(struct vring_used_elem));
84         vhost_log_cache_used_vring(dev, vq,
85                         offsetof(struct vring_used, ring[to]),
86                         size * sizeof(struct vring_used_elem));
87 }
88
89 static __rte_always_inline void
90 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
91 {
92         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
93
94         if (used_idx + vq->shadow_used_idx <= vq->size) {
95                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
96                                           vq->shadow_used_idx);
97         } else {
98                 uint16_t size;
99
100                 /* update used ring interval [used_idx, vq->size] */
101                 size = vq->size - used_idx;
102                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
103
104                 /* update the left half used ring interval [0, left_size] */
105                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
106                                           vq->shadow_used_idx - size);
107         }
108         vq->last_used_idx += vq->shadow_used_idx;
109
110         vhost_log_cache_sync(dev, vq);
111
112         __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
113                            __ATOMIC_RELEASE);
114         vq->shadow_used_idx = 0;
115         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
116                 sizeof(vq->used->idx));
117 }
118
119 static __rte_always_inline void
120 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
121                          uint16_t desc_idx, uint32_t len)
122 {
123         uint16_t i = vq->shadow_used_idx++;
124
125         vq->shadow_used_split[i].id  = desc_idx;
126         vq->shadow_used_split[i].len = len;
127 }
128
129 static __rte_always_inline void
130 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
131                                   struct vhost_virtqueue *vq)
132 {
133         int i;
134         uint16_t used_idx = vq->last_used_idx;
135         uint16_t head_idx = vq->last_used_idx;
136         uint16_t head_flags = 0;
137
138         /* Split loop in two to save memory barriers */
139         for (i = 0; i < vq->shadow_used_idx; i++) {
140                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
141                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
142
143                 used_idx += vq->shadow_used_packed[i].count;
144                 if (used_idx >= vq->size)
145                         used_idx -= vq->size;
146         }
147
148         /* The ordering for storing desc flags needs to be enforced. */
149         rte_atomic_thread_fence(__ATOMIC_RELEASE);
150
151         for (i = 0; i < vq->shadow_used_idx; i++) {
152                 uint16_t flags;
153
154                 if (vq->shadow_used_packed[i].len)
155                         flags = VRING_DESC_F_WRITE;
156                 else
157                         flags = 0;
158
159                 if (vq->used_wrap_counter) {
160                         flags |= VRING_DESC_F_USED;
161                         flags |= VRING_DESC_F_AVAIL;
162                 } else {
163                         flags &= ~VRING_DESC_F_USED;
164                         flags &= ~VRING_DESC_F_AVAIL;
165                 }
166
167                 if (i > 0) {
168                         vq->desc_packed[vq->last_used_idx].flags = flags;
169
170                         vhost_log_cache_used_vring(dev, vq,
171                                         vq->last_used_idx *
172                                         sizeof(struct vring_packed_desc),
173                                         sizeof(struct vring_packed_desc));
174                 } else {
175                         head_idx = vq->last_used_idx;
176                         head_flags = flags;
177                 }
178
179                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
180         }
181
182         vq->desc_packed[head_idx].flags = head_flags;
183
184         vhost_log_cache_used_vring(dev, vq,
185                                 head_idx *
186                                 sizeof(struct vring_packed_desc),
187                                 sizeof(struct vring_packed_desc));
188
189         vq->shadow_used_idx = 0;
190         vhost_log_cache_sync(dev, vq);
191 }
192
193 static __rte_always_inline void
194 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
195                                   struct vhost_virtqueue *vq)
196 {
197         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
198
199         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
200         /* desc flags is the synchronization point for virtio packed vring */
201         __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
202                          used_elem->flags, __ATOMIC_RELEASE);
203
204         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
205                                    sizeof(struct vring_packed_desc),
206                                    sizeof(struct vring_packed_desc));
207         vq->shadow_used_idx = 0;
208         vhost_log_cache_sync(dev, vq);
209 }
210
211 static __rte_always_inline void
212 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
213                                  struct vhost_virtqueue *vq,
214                                  uint64_t *lens,
215                                  uint16_t *ids)
216 {
217         uint16_t i;
218         uint16_t flags;
219         uint16_t last_used_idx;
220         struct vring_packed_desc *desc_base;
221
222         last_used_idx = vq->last_used_idx;
223         desc_base = &vq->desc_packed[last_used_idx];
224
225         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
226
227         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
228                 desc_base[i].id = ids[i];
229                 desc_base[i].len = lens[i];
230         }
231
232         rte_atomic_thread_fence(__ATOMIC_RELEASE);
233
234         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
235                 desc_base[i].flags = flags;
236         }
237
238         vhost_log_cache_used_vring(dev, vq, last_used_idx *
239                                    sizeof(struct vring_packed_desc),
240                                    sizeof(struct vring_packed_desc) *
241                                    PACKED_BATCH_SIZE);
242         vhost_log_cache_sync(dev, vq);
243
244         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
245 }
246
247 static __rte_always_inline void
248 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
249                                           uint16_t id)
250 {
251         vq->shadow_used_packed[0].id = id;
252
253         if (!vq->shadow_used_idx) {
254                 vq->shadow_last_used_idx = vq->last_used_idx;
255                 vq->shadow_used_packed[0].flags =
256                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
257                 vq->shadow_used_packed[0].len = 0;
258                 vq->shadow_used_packed[0].count = 1;
259                 vq->shadow_used_idx++;
260         }
261
262         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
263 }
264
265 static __rte_always_inline void
266 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
267                                   struct vhost_virtqueue *vq,
268                                   uint16_t *ids)
269 {
270         uint16_t flags;
271         uint16_t i;
272         uint16_t begin;
273
274         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
275
276         if (!vq->shadow_used_idx) {
277                 vq->shadow_last_used_idx = vq->last_used_idx;
278                 vq->shadow_used_packed[0].id  = ids[0];
279                 vq->shadow_used_packed[0].len = 0;
280                 vq->shadow_used_packed[0].count = 1;
281                 vq->shadow_used_packed[0].flags = flags;
282                 vq->shadow_used_idx++;
283                 begin = 1;
284         } else
285                 begin = 0;
286
287         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
288                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
289                 vq->desc_packed[vq->last_used_idx + i].len = 0;
290         }
291
292         rte_atomic_thread_fence(__ATOMIC_RELEASE);
293         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
294                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
295
296         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
297                                    sizeof(struct vring_packed_desc),
298                                    sizeof(struct vring_packed_desc) *
299                                    PACKED_BATCH_SIZE);
300         vhost_log_cache_sync(dev, vq);
301
302         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
303 }
304
305 static __rte_always_inline void
306 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
307                                    uint16_t buf_id,
308                                    uint16_t count)
309 {
310         uint16_t flags;
311
312         flags = vq->desc_packed[vq->last_used_idx].flags;
313         if (vq->used_wrap_counter) {
314                 flags |= VRING_DESC_F_USED;
315                 flags |= VRING_DESC_F_AVAIL;
316         } else {
317                 flags &= ~VRING_DESC_F_USED;
318                 flags &= ~VRING_DESC_F_AVAIL;
319         }
320
321         if (!vq->shadow_used_idx) {
322                 vq->shadow_last_used_idx = vq->last_used_idx;
323
324                 vq->shadow_used_packed[0].id  = buf_id;
325                 vq->shadow_used_packed[0].len = 0;
326                 vq->shadow_used_packed[0].flags = flags;
327                 vq->shadow_used_idx++;
328         } else {
329                 vq->desc_packed[vq->last_used_idx].id = buf_id;
330                 vq->desc_packed[vq->last_used_idx].len = 0;
331                 vq->desc_packed[vq->last_used_idx].flags = flags;
332         }
333
334         vq_inc_last_used_packed(vq, count);
335 }
336
337 static __rte_always_inline void
338 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
339                                            uint16_t buf_id,
340                                            uint16_t count)
341 {
342         uint16_t flags;
343
344         vq->shadow_used_packed[0].id = buf_id;
345
346         flags = vq->desc_packed[vq->last_used_idx].flags;
347         if (vq->used_wrap_counter) {
348                 flags |= VRING_DESC_F_USED;
349                 flags |= VRING_DESC_F_AVAIL;
350         } else {
351                 flags &= ~VRING_DESC_F_USED;
352                 flags &= ~VRING_DESC_F_AVAIL;
353         }
354
355         if (!vq->shadow_used_idx) {
356                 vq->shadow_last_used_idx = vq->last_used_idx;
357                 vq->shadow_used_packed[0].len = 0;
358                 vq->shadow_used_packed[0].flags = flags;
359                 vq->shadow_used_idx++;
360         }
361
362         vq_inc_last_used_packed(vq, count);
363 }
364
365 static __rte_always_inline void
366 vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
367                                    uint32_t *len,
368                                    uint16_t *id,
369                                    uint16_t *count,
370                                    uint16_t num_buffers)
371 {
372         uint16_t i;
373
374         for (i = 0; i < num_buffers; i++) {
375                 /* enqueue shadow flush action aligned with batch num */
376                 if (!vq->shadow_used_idx)
377                         vq->shadow_aligned_idx = vq->last_used_idx &
378                                 PACKED_BATCH_MASK;
379                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
380                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
381                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
382                 vq->shadow_aligned_idx += count[i];
383                 vq->shadow_used_idx++;
384         }
385 }
386
387 static __rte_always_inline void
388 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
389                                    struct vhost_virtqueue *vq,
390                                    uint32_t *len,
391                                    uint16_t *id,
392                                    uint16_t *count,
393                                    uint16_t num_buffers)
394 {
395         vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
396
397         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
398                 do_data_copy_enqueue(dev, vq);
399                 vhost_flush_enqueue_shadow_packed(dev, vq);
400         }
401 }
402
403 /* avoid write operation when necessary, to lessen cache issues */
404 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
405         if ((var) != (val))                     \
406                 (var) = (val);                  \
407 } while (0)
408
409 static __rte_always_inline void
410 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
411 {
412         uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
413
414         if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
415                 csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
416
417         if (csum_l4) {
418                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
419                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
420
421                 switch (csum_l4) {
422                 case RTE_MBUF_F_TX_TCP_CKSUM:
423                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
424                                                 cksum));
425                         break;
426                 case RTE_MBUF_F_TX_UDP_CKSUM:
427                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
428                                                 dgram_cksum));
429                         break;
430                 case RTE_MBUF_F_TX_SCTP_CKSUM:
431                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
432                                                 cksum));
433                         break;
434                 }
435         } else {
436                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
437                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
438                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
439         }
440
441         /* IP cksum verification cannot be bypassed, then calculate here */
442         if (m_buf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
443                 struct rte_ipv4_hdr *ipv4_hdr;
444
445                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
446                                                    m_buf->l2_len);
447                 ipv4_hdr->hdr_checksum = 0;
448                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
449         }
450
451         if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
452                 if (m_buf->ol_flags & RTE_MBUF_F_TX_IPV4)
453                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
454                 else
455                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
456                 net_hdr->gso_size = m_buf->tso_segsz;
457                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
458                                         + m_buf->l4_len;
459         } else if (m_buf->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
460                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
461                 net_hdr->gso_size = m_buf->tso_segsz;
462                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
463                         m_buf->l4_len;
464         } else {
465                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
466                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
467                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
468         }
469 }
470
471 static __rte_always_inline int
472 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
473                 struct buf_vector *buf_vec, uint16_t *vec_idx,
474                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
475 {
476         uint16_t vec_id = *vec_idx;
477
478         while (desc_len) {
479                 uint64_t desc_addr;
480                 uint64_t desc_chunck_len = desc_len;
481
482                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
483                         return -1;
484
485                 desc_addr = vhost_iova_to_vva(dev, vq,
486                                 desc_iova,
487                                 &desc_chunck_len,
488                                 perm);
489                 if (unlikely(!desc_addr))
490                         return -1;
491
492                 rte_prefetch0((void *)(uintptr_t)desc_addr);
493
494                 buf_vec[vec_id].buf_iova = desc_iova;
495                 buf_vec[vec_id].buf_addr = desc_addr;
496                 buf_vec[vec_id].buf_len  = desc_chunck_len;
497
498                 desc_len -= desc_chunck_len;
499                 desc_iova += desc_chunck_len;
500                 vec_id++;
501         }
502         *vec_idx = vec_id;
503
504         return 0;
505 }
506
507 static __rte_always_inline int
508 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
509                          uint32_t avail_idx, uint16_t *vec_idx,
510                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
511                          uint32_t *desc_chain_len, uint8_t perm)
512 {
513         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
514         uint16_t vec_id = *vec_idx;
515         uint32_t len    = 0;
516         uint64_t dlen;
517         uint32_t nr_descs = vq->size;
518         uint32_t cnt    = 0;
519         struct vring_desc *descs = vq->desc;
520         struct vring_desc *idesc = NULL;
521
522         if (unlikely(idx >= vq->size))
523                 return -1;
524
525         *desc_chain_head = idx;
526
527         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
528                 dlen = vq->desc[idx].len;
529                 nr_descs = dlen / sizeof(struct vring_desc);
530                 if (unlikely(nr_descs > vq->size))
531                         return -1;
532
533                 descs = (struct vring_desc *)(uintptr_t)
534                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
535                                                 &dlen,
536                                                 VHOST_ACCESS_RO);
537                 if (unlikely(!descs))
538                         return -1;
539
540                 if (unlikely(dlen < vq->desc[idx].len)) {
541                         /*
542                          * The indirect desc table is not contiguous
543                          * in process VA space, we have to copy it.
544                          */
545                         idesc = vhost_alloc_copy_ind_table(dev, vq,
546                                         vq->desc[idx].addr, vq->desc[idx].len);
547                         if (unlikely(!idesc))
548                                 return -1;
549
550                         descs = idesc;
551                 }
552
553                 idx = 0;
554         }
555
556         while (1) {
557                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
558                         free_ind_table(idesc);
559                         return -1;
560                 }
561
562                 dlen = descs[idx].len;
563                 len += dlen;
564
565                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
566                                                 descs[idx].addr, dlen,
567                                                 perm))) {
568                         free_ind_table(idesc);
569                         return -1;
570                 }
571
572                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
573                         break;
574
575                 idx = descs[idx].next;
576         }
577
578         *desc_chain_len = len;
579         *vec_idx = vec_id;
580
581         if (unlikely(!!idesc))
582                 free_ind_table(idesc);
583
584         return 0;
585 }
586
587 /*
588  * Returns -1 on fail, 0 on success
589  */
590 static inline int
591 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
592                                 uint32_t size, struct buf_vector *buf_vec,
593                                 uint16_t *num_buffers, uint16_t avail_head,
594                                 uint16_t *nr_vec)
595 {
596         uint16_t cur_idx;
597         uint16_t vec_idx = 0;
598         uint16_t max_tries, tries = 0;
599
600         uint16_t head_idx = 0;
601         uint32_t len = 0;
602
603         *num_buffers = 0;
604         cur_idx  = vq->last_avail_idx;
605
606         if (rxvq_is_mergeable(dev))
607                 max_tries = vq->size - 1;
608         else
609                 max_tries = 1;
610
611         while (size > 0) {
612                 if (unlikely(cur_idx == avail_head))
613                         return -1;
614                 /*
615                  * if we tried all available ring items, and still
616                  * can't get enough buf, it means something abnormal
617                  * happened.
618                  */
619                 if (unlikely(++tries > max_tries))
620                         return -1;
621
622                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
623                                                 &vec_idx, buf_vec,
624                                                 &head_idx, &len,
625                                                 VHOST_ACCESS_RW) < 0))
626                         return -1;
627                 len = RTE_MIN(len, size);
628                 update_shadow_used_ring_split(vq, head_idx, len);
629                 size -= len;
630
631                 cur_idx++;
632                 *num_buffers += 1;
633         }
634
635         *nr_vec = vec_idx;
636
637         return 0;
638 }
639
640 static __rte_always_inline int
641 fill_vec_buf_packed_indirect(struct virtio_net *dev,
642                         struct vhost_virtqueue *vq,
643                         struct vring_packed_desc *desc, uint16_t *vec_idx,
644                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
645 {
646         uint16_t i;
647         uint32_t nr_descs;
648         uint16_t vec_id = *vec_idx;
649         uint64_t dlen;
650         struct vring_packed_desc *descs, *idescs = NULL;
651
652         dlen = desc->len;
653         descs = (struct vring_packed_desc *)(uintptr_t)
654                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
655         if (unlikely(!descs))
656                 return -1;
657
658         if (unlikely(dlen < desc->len)) {
659                 /*
660                  * The indirect desc table is not contiguous
661                  * in process VA space, we have to copy it.
662                  */
663                 idescs = vhost_alloc_copy_ind_table(dev,
664                                 vq, desc->addr, desc->len);
665                 if (unlikely(!idescs))
666                         return -1;
667
668                 descs = idescs;
669         }
670
671         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
672         if (unlikely(nr_descs >= vq->size)) {
673                 free_ind_table(idescs);
674                 return -1;
675         }
676
677         for (i = 0; i < nr_descs; i++) {
678                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
679                         free_ind_table(idescs);
680                         return -1;
681                 }
682
683                 dlen = descs[i].len;
684                 *len += dlen;
685                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
686                                                 descs[i].addr, dlen,
687                                                 perm)))
688                         return -1;
689         }
690         *vec_idx = vec_id;
691
692         if (unlikely(!!idescs))
693                 free_ind_table(idescs);
694
695         return 0;
696 }
697
698 static __rte_always_inline int
699 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
700                                 uint16_t avail_idx, uint16_t *desc_count,
701                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
702                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
703 {
704         bool wrap_counter = vq->avail_wrap_counter;
705         struct vring_packed_desc *descs = vq->desc_packed;
706         uint16_t vec_id = *vec_idx;
707         uint64_t dlen;
708
709         if (avail_idx < vq->last_avail_idx)
710                 wrap_counter ^= 1;
711
712         /*
713          * Perform a load-acquire barrier in desc_is_avail to
714          * enforce the ordering between desc flags and desc
715          * content.
716          */
717         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
718                 return -1;
719
720         *desc_count = 0;
721         *len = 0;
722
723         while (1) {
724                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
725                         return -1;
726
727                 if (unlikely(*desc_count >= vq->size))
728                         return -1;
729
730                 *desc_count += 1;
731                 *buf_id = descs[avail_idx].id;
732
733                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
734                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
735                                                         &descs[avail_idx],
736                                                         &vec_id, buf_vec,
737                                                         len, perm) < 0))
738                                 return -1;
739                 } else {
740                         dlen = descs[avail_idx].len;
741                         *len += dlen;
742
743                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
744                                                         descs[avail_idx].addr,
745                                                         dlen,
746                                                         perm)))
747                                 return -1;
748                 }
749
750                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
751                         break;
752
753                 if (++avail_idx >= vq->size) {
754                         avail_idx -= vq->size;
755                         wrap_counter ^= 1;
756                 }
757         }
758
759         *vec_idx = vec_id;
760
761         return 0;
762 }
763
764 static __rte_noinline void
765 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
766                 struct buf_vector *buf_vec,
767                 struct virtio_net_hdr_mrg_rxbuf *hdr)
768 {
769         uint64_t len;
770         uint64_t remain = dev->vhost_hlen;
771         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
772         uint64_t iova = buf_vec->buf_iova;
773
774         while (remain) {
775                 len = RTE_MIN(remain,
776                                 buf_vec->buf_len);
777                 dst = buf_vec->buf_addr;
778                 rte_memcpy((void *)(uintptr_t)dst,
779                                 (void *)(uintptr_t)src,
780                                 len);
781
782                 PRINT_PACKET(dev, (uintptr_t)dst,
783                                 (uint32_t)len, 0);
784                 vhost_log_cache_write_iova(dev, vq,
785                                 iova, len);
786
787                 remain -= len;
788                 iova += len;
789                 src += len;
790                 buf_vec++;
791         }
792 }
793
794 static __rte_always_inline int
795 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
796                             struct rte_mbuf *m, struct buf_vector *buf_vec,
797                             uint16_t nr_vec, uint16_t num_buffers)
798 {
799         uint32_t vec_idx = 0;
800         uint32_t mbuf_offset, mbuf_avail;
801         uint32_t buf_offset, buf_avail;
802         uint64_t buf_addr, buf_iova, buf_len;
803         uint32_t cpy_len;
804         uint64_t hdr_addr;
805         struct rte_mbuf *hdr_mbuf;
806         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
807         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
808         int error = 0;
809
810         if (unlikely(m == NULL)) {
811                 error = -1;
812                 goto out;
813         }
814
815         buf_addr = buf_vec[vec_idx].buf_addr;
816         buf_iova = buf_vec[vec_idx].buf_iova;
817         buf_len = buf_vec[vec_idx].buf_len;
818
819         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
820                 error = -1;
821                 goto out;
822         }
823
824         hdr_mbuf = m;
825         hdr_addr = buf_addr;
826         if (unlikely(buf_len < dev->vhost_hlen)) {
827                 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
828                 hdr = &tmp_hdr;
829         } else
830                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
831
832         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
833                 dev->vid, num_buffers);
834
835         if (unlikely(buf_len < dev->vhost_hlen)) {
836                 buf_offset = dev->vhost_hlen - buf_len;
837                 vec_idx++;
838                 buf_addr = buf_vec[vec_idx].buf_addr;
839                 buf_iova = buf_vec[vec_idx].buf_iova;
840                 buf_len = buf_vec[vec_idx].buf_len;
841                 buf_avail = buf_len - buf_offset;
842         } else {
843                 buf_offset = dev->vhost_hlen;
844                 buf_avail = buf_len - dev->vhost_hlen;
845         }
846
847         mbuf_avail  = rte_pktmbuf_data_len(m);
848         mbuf_offset = 0;
849         while (mbuf_avail != 0 || m->next != NULL) {
850                 /* done with current buf, get the next one */
851                 if (buf_avail == 0) {
852                         vec_idx++;
853                         if (unlikely(vec_idx >= nr_vec)) {
854                                 error = -1;
855                                 goto out;
856                         }
857
858                         buf_addr = buf_vec[vec_idx].buf_addr;
859                         buf_iova = buf_vec[vec_idx].buf_iova;
860                         buf_len = buf_vec[vec_idx].buf_len;
861
862                         buf_offset = 0;
863                         buf_avail  = buf_len;
864                 }
865
866                 /* done with current mbuf, get the next one */
867                 if (mbuf_avail == 0) {
868                         m = m->next;
869
870                         mbuf_offset = 0;
871                         mbuf_avail  = rte_pktmbuf_data_len(m);
872                 }
873
874                 if (hdr_addr) {
875                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
876                         if (rxvq_is_mergeable(dev))
877                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
878                                                 num_buffers);
879
880                         if (unlikely(hdr == &tmp_hdr)) {
881                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
882                         } else {
883                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
884                                                 dev->vhost_hlen, 0);
885                                 vhost_log_cache_write_iova(dev, vq,
886                                                 buf_vec[0].buf_iova,
887                                                 dev->vhost_hlen);
888                         }
889
890                         hdr_addr = 0;
891                 }
892
893                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
894
895                 if (likely(cpy_len > MAX_BATCH_LEN ||
896                                         vq->batch_copy_nb_elems >= vq->size)) {
897                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
898                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
899                                 cpy_len);
900                         vhost_log_cache_write_iova(dev, vq,
901                                                    buf_iova + buf_offset,
902                                                    cpy_len);
903                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
904                                 cpy_len, 0);
905                 } else {
906                         batch_copy[vq->batch_copy_nb_elems].dst =
907                                 (void *)((uintptr_t)(buf_addr + buf_offset));
908                         batch_copy[vq->batch_copy_nb_elems].src =
909                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
910                         batch_copy[vq->batch_copy_nb_elems].log_addr =
911                                 buf_iova + buf_offset;
912                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
913                         vq->batch_copy_nb_elems++;
914                 }
915
916                 mbuf_avail  -= cpy_len;
917                 mbuf_offset += cpy_len;
918                 buf_avail  -= cpy_len;
919                 buf_offset += cpy_len;
920         }
921
922 out:
923
924         return error;
925 }
926
927 static __rte_always_inline int
928 async_iter_initialize(struct vhost_async *async)
929 {
930         struct rte_vhost_iov_iter *iter;
931
932         if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
933                 VHOST_LOG_DATA(ERR, "no more async iovec available\n");
934                 return -1;
935         }
936
937         iter = async->iov_iter + async->iter_idx;
938         iter->iov = async->iovec + async->iovec_idx;
939         iter->nr_segs = 0;
940
941         return 0;
942 }
943
944 static __rte_always_inline int
945 async_iter_add_iovec(struct vhost_async *async, void *src, void *dst, size_t len)
946 {
947         struct rte_vhost_iov_iter *iter;
948         struct rte_vhost_iovec *iovec;
949
950         if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
951                 static bool vhost_max_async_vec_log;
952
953                 if (!vhost_max_async_vec_log) {
954                         VHOST_LOG_DATA(ERR, "no more async iovec available\n");
955                         vhost_max_async_vec_log = true;
956                 }
957
958                 return -1;
959         }
960
961         iter = async->iov_iter + async->iter_idx;
962         iovec = async->iovec + async->iovec_idx;
963
964         iovec->src_addr = src;
965         iovec->dst_addr = dst;
966         iovec->len = len;
967
968         iter->nr_segs++;
969         async->iovec_idx++;
970
971         return 0;
972 }
973
974 static __rte_always_inline void
975 async_iter_finalize(struct vhost_async *async)
976 {
977         async->iter_idx++;
978 }
979
980 static __rte_always_inline void
981 async_iter_cancel(struct vhost_async *async)
982 {
983         struct rte_vhost_iov_iter *iter;
984
985         iter = async->iov_iter + async->iter_idx;
986         async->iovec_idx -= iter->nr_segs;
987         iter->nr_segs = 0;
988         iter->iov = NULL;
989 }
990
991 static __rte_always_inline void
992 async_iter_reset(struct vhost_async *async)
993 {
994         async->iter_idx = 0;
995         async->iovec_idx = 0;
996 }
997
998 static __rte_always_inline void
999 async_fill_descs(struct vhost_async *async, struct rte_vhost_async_desc *descs)
1000 {
1001         int i;
1002
1003         for (i = 0; i < async->iter_idx; i++)
1004                 descs[i].iter = async->iov_iter + i;
1005 }
1006
1007 static __rte_always_inline int
1008 async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
1009                         struct rte_mbuf *m, struct buf_vector *buf_vec,
1010                         uint16_t nr_vec, uint16_t num_buffers)
1011 {
1012         struct vhost_async *async = vq->async;
1013         struct rte_mbuf *hdr_mbuf;
1014         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
1015         uint64_t buf_addr, buf_iova;
1016         uint64_t hdr_addr;
1017         uint64_t mapped_len;
1018         uint32_t vec_idx = 0;
1019         uint32_t mbuf_offset, mbuf_avail;
1020         uint32_t buf_offset, buf_avail;
1021         uint32_t cpy_len, buf_len;
1022
1023         void *hpa;
1024
1025         if (unlikely(m == NULL))
1026                 return -1;
1027
1028         buf_addr = buf_vec[vec_idx].buf_addr;
1029         buf_iova = buf_vec[vec_idx].buf_iova;
1030         buf_len = buf_vec[vec_idx].buf_len;
1031
1032         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
1033                 return -1;
1034
1035         hdr_mbuf = m;
1036         hdr_addr = buf_addr;
1037         if (unlikely(buf_len < dev->vhost_hlen)) {
1038                 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
1039                 hdr = &tmp_hdr;
1040         } else
1041                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
1042
1043         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
1044                 dev->vid, num_buffers);
1045
1046         if (unlikely(buf_len < dev->vhost_hlen)) {
1047                 buf_offset = dev->vhost_hlen - buf_len;
1048                 vec_idx++;
1049                 buf_addr = buf_vec[vec_idx].buf_addr;
1050                 buf_iova = buf_vec[vec_idx].buf_iova;
1051                 buf_len = buf_vec[vec_idx].buf_len;
1052                 buf_avail = buf_len - buf_offset;
1053         } else {
1054                 buf_offset = dev->vhost_hlen;
1055                 buf_avail = buf_len - dev->vhost_hlen;
1056         }
1057
1058         mbuf_avail  = rte_pktmbuf_data_len(m);
1059         mbuf_offset = 0;
1060
1061         if (async_iter_initialize(async))
1062                 return -1;
1063
1064         while (mbuf_avail != 0 || m->next != NULL) {
1065                 /* done with current buf, get the next one */
1066                 if (buf_avail == 0) {
1067                         vec_idx++;
1068                         if (unlikely(vec_idx >= nr_vec))
1069                                 goto error;
1070
1071                         buf_addr = buf_vec[vec_idx].buf_addr;
1072                         buf_iova = buf_vec[vec_idx].buf_iova;
1073                         buf_len = buf_vec[vec_idx].buf_len;
1074
1075                         buf_offset = 0;
1076                         buf_avail = buf_len;
1077                 }
1078
1079                 /* done with current mbuf, get the next one */
1080                 if (mbuf_avail == 0) {
1081                         m = m->next;
1082
1083                         mbuf_offset = 0;
1084                         mbuf_avail = rte_pktmbuf_data_len(m);
1085                 }
1086
1087                 if (hdr_addr) {
1088                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1089                         if (rxvq_is_mergeable(dev))
1090                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1091                                                 num_buffers);
1092
1093                         if (unlikely(hdr == &tmp_hdr)) {
1094                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1095                         } else {
1096                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1097                                                 dev->vhost_hlen, 0);
1098                                 vhost_log_cache_write_iova(dev, vq,
1099                                                 buf_vec[0].buf_iova,
1100                                                 dev->vhost_hlen);
1101                         }
1102
1103                         hdr_addr = 0;
1104                 }
1105
1106                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1107
1108                 while (unlikely(cpy_len)) {
1109                         hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
1110                                         buf_iova + buf_offset,
1111                                         cpy_len, &mapped_len);
1112                         if (unlikely(!hpa)) {
1113                                 VHOST_LOG_DATA(ERR, "(%d) %s: failed to get hpa.\n",
1114                                 dev->vid, __func__);
1115                                 goto error;
1116                         }
1117
1118                         if (unlikely(async_iter_add_iovec(async,
1119                                         (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
1120                                                 mbuf_offset),
1121                                         hpa, (size_t)mapped_len)))
1122                                 goto error;
1123
1124                         cpy_len -= (uint32_t)mapped_len;
1125                         mbuf_avail  -= (uint32_t)mapped_len;
1126                         mbuf_offset += (uint32_t)mapped_len;
1127                         buf_avail  -= (uint32_t)mapped_len;
1128                         buf_offset += (uint32_t)mapped_len;
1129                 }
1130         }
1131
1132         async_iter_finalize(async);
1133
1134         return 0;
1135 error:
1136         async_iter_cancel(async);
1137
1138         return -1;
1139 }
1140
1141 static __rte_always_inline int
1142 vhost_enqueue_single_packed(struct virtio_net *dev,
1143                             struct vhost_virtqueue *vq,
1144                             struct rte_mbuf *pkt,
1145                             struct buf_vector *buf_vec,
1146                             uint16_t *nr_descs)
1147 {
1148         uint16_t nr_vec = 0;
1149         uint16_t avail_idx = vq->last_avail_idx;
1150         uint16_t max_tries, tries = 0;
1151         uint16_t buf_id = 0;
1152         uint32_t len = 0;
1153         uint16_t desc_count;
1154         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1155         uint16_t num_buffers = 0;
1156         uint32_t buffer_len[vq->size];
1157         uint16_t buffer_buf_id[vq->size];
1158         uint16_t buffer_desc_count[vq->size];
1159
1160         if (rxvq_is_mergeable(dev))
1161                 max_tries = vq->size - 1;
1162         else
1163                 max_tries = 1;
1164
1165         while (size > 0) {
1166                 /*
1167                  * if we tried all available ring items, and still
1168                  * can't get enough buf, it means something abnormal
1169                  * happened.
1170                  */
1171                 if (unlikely(++tries > max_tries))
1172                         return -1;
1173
1174                 if (unlikely(fill_vec_buf_packed(dev, vq,
1175                                                 avail_idx, &desc_count,
1176                                                 buf_vec, &nr_vec,
1177                                                 &buf_id, &len,
1178                                                 VHOST_ACCESS_RW) < 0))
1179                         return -1;
1180
1181                 len = RTE_MIN(len, size);
1182                 size -= len;
1183
1184                 buffer_len[num_buffers] = len;
1185                 buffer_buf_id[num_buffers] = buf_id;
1186                 buffer_desc_count[num_buffers] = desc_count;
1187                 num_buffers += 1;
1188
1189                 *nr_descs += desc_count;
1190                 avail_idx += desc_count;
1191                 if (avail_idx >= vq->size)
1192                         avail_idx -= vq->size;
1193         }
1194
1195         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
1196                 return -1;
1197
1198         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1199                                            buffer_desc_count, num_buffers);
1200
1201         return 0;
1202 }
1203
1204 static __rte_noinline uint32_t
1205 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1206         struct rte_mbuf **pkts, uint32_t count)
1207 {
1208         uint32_t pkt_idx = 0;
1209         uint16_t num_buffers;
1210         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1211         uint16_t avail_head;
1212
1213         /*
1214          * The ordering between avail index and
1215          * desc reads needs to be enforced.
1216          */
1217         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1218
1219         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1220
1221         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1222                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1223                 uint16_t nr_vec = 0;
1224
1225                 if (unlikely(reserve_avail_buf_split(dev, vq,
1226                                                 pkt_len, buf_vec, &num_buffers,
1227                                                 avail_head, &nr_vec) < 0)) {
1228                         VHOST_LOG_DATA(DEBUG,
1229                                 "(%d) failed to get enough desc from vring\n",
1230                                 dev->vid);
1231                         vq->shadow_used_idx -= num_buffers;
1232                         break;
1233                 }
1234
1235                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1236                         dev->vid, vq->last_avail_idx,
1237                         vq->last_avail_idx + num_buffers);
1238
1239                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1240                                                 buf_vec, nr_vec,
1241                                                 num_buffers) < 0) {
1242                         vq->shadow_used_idx -= num_buffers;
1243                         break;
1244                 }
1245
1246                 vq->last_avail_idx += num_buffers;
1247         }
1248
1249         do_data_copy_enqueue(dev, vq);
1250
1251         if (likely(vq->shadow_used_idx)) {
1252                 flush_shadow_used_ring_split(dev, vq);
1253                 vhost_vring_call_split(dev, vq);
1254         }
1255
1256         return pkt_idx;
1257 }
1258
1259 static __rte_always_inline int
1260 virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
1261                            struct vhost_virtqueue *vq,
1262                            struct rte_mbuf **pkts,
1263                            uint64_t *desc_addrs,
1264                            uint64_t *lens)
1265 {
1266         bool wrap_counter = vq->avail_wrap_counter;
1267         struct vring_packed_desc *descs = vq->desc_packed;
1268         uint16_t avail_idx = vq->last_avail_idx;
1269         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1270         uint16_t i;
1271
1272         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1273                 return -1;
1274
1275         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1276                 return -1;
1277
1278         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1279                 if (unlikely(pkts[i]->next != NULL))
1280                         return -1;
1281                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1282                                             wrap_counter)))
1283                         return -1;
1284         }
1285
1286         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1287                 lens[i] = descs[avail_idx + i].len;
1288
1289         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1290                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1291                         return -1;
1292         }
1293
1294         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1295                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1296                                                   descs[avail_idx + i].addr,
1297                                                   &lens[i],
1298                                                   VHOST_ACCESS_RW);
1299
1300         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1301                 if (unlikely(!desc_addrs[i]))
1302                         return -1;
1303                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1304                         return -1;
1305         }
1306
1307         return 0;
1308 }
1309
1310 static __rte_always_inline void
1311 virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
1312                            struct vhost_virtqueue *vq,
1313                            struct rte_mbuf **pkts,
1314                            uint64_t *desc_addrs,
1315                            uint64_t *lens)
1316 {
1317         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1318         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1319         struct vring_packed_desc *descs = vq->desc_packed;
1320         uint16_t avail_idx = vq->last_avail_idx;
1321         uint16_t ids[PACKED_BATCH_SIZE];
1322         uint16_t i;
1323
1324         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1325                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1326                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1327                                         (uintptr_t)desc_addrs[i];
1328                 lens[i] = pkts[i]->pkt_len +
1329                         sizeof(struct virtio_net_hdr_mrg_rxbuf);
1330         }
1331
1332         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1333                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1334
1335         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1336
1337         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1338                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1339                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1340                            pkts[i]->pkt_len);
1341         }
1342
1343         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1344                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1345                                            lens[i]);
1346
1347         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1348                 ids[i] = descs[avail_idx + i].id;
1349
1350         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1351 }
1352
1353 static __rte_always_inline int
1354 virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
1355                            struct vhost_virtqueue *vq,
1356                            struct rte_mbuf **pkts)
1357 {
1358         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1359         uint64_t lens[PACKED_BATCH_SIZE];
1360
1361         if (virtio_dev_rx_sync_batch_check(dev, vq, pkts, desc_addrs, lens) == -1)
1362                 return -1;
1363
1364         if (vq->shadow_used_idx) {
1365                 do_data_copy_enqueue(dev, vq);
1366                 vhost_flush_enqueue_shadow_packed(dev, vq);
1367         }
1368
1369         virtio_dev_rx_batch_packed_copy(dev, vq, pkts, desc_addrs, lens);
1370
1371         return 0;
1372 }
1373
1374 static __rte_always_inline int16_t
1375 virtio_dev_rx_single_packed(struct virtio_net *dev,
1376                             struct vhost_virtqueue *vq,
1377                             struct rte_mbuf *pkt)
1378 {
1379         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1380         uint16_t nr_descs = 0;
1381
1382         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1383                                                  &nr_descs) < 0)) {
1384                 VHOST_LOG_DATA(DEBUG,
1385                                 "(%d) failed to get enough desc from vring\n",
1386                                 dev->vid);
1387                 return -1;
1388         }
1389
1390         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1391                         dev->vid, vq->last_avail_idx,
1392                         vq->last_avail_idx + nr_descs);
1393
1394         vq_inc_last_avail_packed(vq, nr_descs);
1395
1396         return 0;
1397 }
1398
1399 static __rte_noinline uint32_t
1400 virtio_dev_rx_packed(struct virtio_net *dev,
1401                      struct vhost_virtqueue *__rte_restrict vq,
1402                      struct rte_mbuf **__rte_restrict pkts,
1403                      uint32_t count)
1404 {
1405         uint32_t pkt_idx = 0;
1406
1407         do {
1408                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1409
1410                 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
1411                         if (!virtio_dev_rx_sync_batch_packed(dev, vq,
1412                                                         &pkts[pkt_idx])) {
1413                                 pkt_idx += PACKED_BATCH_SIZE;
1414                                 continue;
1415                         }
1416                 }
1417
1418                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1419                         break;
1420                 pkt_idx++;
1421
1422         } while (pkt_idx < count);
1423
1424         if (vq->shadow_used_idx) {
1425                 do_data_copy_enqueue(dev, vq);
1426                 vhost_flush_enqueue_shadow_packed(dev, vq);
1427         }
1428
1429         if (pkt_idx)
1430                 vhost_vring_call_packed(dev, vq);
1431
1432         return pkt_idx;
1433 }
1434
1435 static __rte_always_inline uint32_t
1436 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1437         struct rte_mbuf **pkts, uint32_t count)
1438 {
1439         struct vhost_virtqueue *vq;
1440         uint32_t nb_tx = 0;
1441
1442         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1443         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1444                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1445                         dev->vid, __func__, queue_id);
1446                 return 0;
1447         }
1448
1449         vq = dev->virtqueue[queue_id];
1450
1451         rte_spinlock_lock(&vq->access_lock);
1452
1453         if (unlikely(!vq->enabled))
1454                 goto out_access_unlock;
1455
1456         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1457                 vhost_user_iotlb_rd_lock(vq);
1458
1459         if (unlikely(!vq->access_ok))
1460                 if (unlikely(vring_translate(dev, vq) < 0))
1461                         goto out;
1462
1463         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1464         if (count == 0)
1465                 goto out;
1466
1467         if (vq_is_packed(dev))
1468                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1469         else
1470                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1471
1472 out:
1473         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1474                 vhost_user_iotlb_rd_unlock(vq);
1475
1476 out_access_unlock:
1477         rte_spinlock_unlock(&vq->access_lock);
1478
1479         return nb_tx;
1480 }
1481
1482 uint16_t
1483 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1484         struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1485 {
1486         struct virtio_net *dev = get_device(vid);
1487
1488         if (!dev)
1489                 return 0;
1490
1491         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1492                 VHOST_LOG_DATA(ERR,
1493                         "(%d) %s: built-in vhost net backend is disabled.\n",
1494                         dev->vid, __func__);
1495                 return 0;
1496         }
1497
1498         return virtio_dev_rx(dev, queue_id, pkts, count);
1499 }
1500
1501 static __rte_always_inline uint16_t
1502 virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
1503         uint16_t vq_size, uint16_t n_inflight)
1504 {
1505         return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
1506                 (vq_size - n_inflight + pkts_idx) % vq_size;
1507 }
1508
1509 static __rte_always_inline void
1510 store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem *d_ring,
1511                 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1512 {
1513         size_t elem_size = sizeof(struct vring_used_elem);
1514
1515         if (d_idx + count <= ring_size) {
1516                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1517         } else {
1518                 uint16_t size = ring_size - d_idx;
1519
1520                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1521                 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1522         }
1523 }
1524
1525 static __rte_always_inline void
1526 store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
1527                 struct vring_used_elem_packed *d_ring,
1528                 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1529 {
1530         size_t elem_size = sizeof(struct vring_used_elem_packed);
1531
1532         if (d_idx + count <= ring_size) {
1533                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1534         } else {
1535                 uint16_t size = ring_size - d_idx;
1536
1537                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1538                 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1539         }
1540 }
1541
1542 static __rte_noinline uint32_t
1543 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
1544         struct vhost_virtqueue *vq, uint16_t queue_id,
1545         struct rte_mbuf **pkts, uint32_t count)
1546 {
1547         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1548         uint32_t pkt_idx = 0;
1549         uint16_t num_buffers;
1550         uint16_t avail_head;
1551
1552         struct vhost_async *async = vq->async;
1553         struct rte_vhost_async_desc async_descs[MAX_PKT_BURST];
1554         struct async_inflight_info *pkts_info = async->pkts_info;
1555         uint32_t pkt_err = 0;
1556         int32_t n_xfer;
1557         uint16_t slot_idx = 0;
1558
1559         /*
1560          * The ordering between avail index and desc reads need to be enforced.
1561          */
1562         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1563
1564         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1565
1566         async_iter_reset(async);
1567
1568         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1569                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1570                 uint16_t nr_vec = 0;
1571
1572                 if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
1573                                                 &num_buffers, avail_head, &nr_vec) < 0)) {
1574                         VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n",
1575                                         dev->vid);
1576                         vq->shadow_used_idx -= num_buffers;
1577                         break;
1578                 }
1579
1580                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1581                         dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
1582
1583                 if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers) < 0) {
1584                         vq->shadow_used_idx -= num_buffers;
1585                         break;
1586                 }
1587
1588                 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
1589                 pkts_info[slot_idx].descs = num_buffers;
1590                 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1591
1592                 vq->last_avail_idx += num_buffers;
1593         }
1594
1595         if (unlikely(pkt_idx == 0))
1596                 return 0;
1597
1598         async_fill_descs(async, async_descs);
1599
1600         n_xfer = async->ops.transfer_data(dev->vid, queue_id, async_descs, 0, pkt_idx);
1601         if (unlikely(n_xfer < 0)) {
1602                 VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
1603                                 dev->vid, __func__, queue_id);
1604                 n_xfer = 0;
1605         }
1606
1607         pkt_err = pkt_idx - n_xfer;
1608         if (unlikely(pkt_err)) {
1609                 uint16_t num_descs = 0;
1610
1611                 /* update number of completed packets */
1612                 pkt_idx = n_xfer;
1613
1614                 /* calculate the sum of descriptors to revert */
1615                 while (pkt_err-- > 0) {
1616                         num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
1617                         slot_idx--;
1618                 }
1619
1620                 /* recover shadow used ring and available ring */
1621                 vq->shadow_used_idx -= num_descs;
1622                 vq->last_avail_idx -= num_descs;
1623         }
1624
1625         /* keep used descriptors */
1626         if (likely(vq->shadow_used_idx)) {
1627                 uint16_t to = async->desc_idx_split & (vq->size - 1);
1628
1629                 store_dma_desc_info_split(vq->shadow_used_split,
1630                                 async->descs_split, vq->size, 0, to,
1631                                 vq->shadow_used_idx);
1632
1633                 async->desc_idx_split += vq->shadow_used_idx;
1634                 async->pkts_idx += pkt_idx;
1635                 async->pkts_inflight_n += pkt_idx;
1636                 vq->shadow_used_idx = 0;
1637         }
1638
1639         return pkt_idx;
1640 }
1641
1642 static __rte_always_inline void
1643 vhost_update_used_packed(struct vhost_virtqueue *vq,
1644                         struct vring_used_elem_packed *shadow_ring,
1645                         uint16_t count)
1646 {
1647         int i;
1648         uint16_t used_idx = vq->last_used_idx;
1649         uint16_t head_idx = vq->last_used_idx;
1650         uint16_t head_flags = 0;
1651
1652         if (count == 0)
1653                 return;
1654
1655         /* Split loop in two to save memory barriers */
1656         for (i = 0; i < count; i++) {
1657                 vq->desc_packed[used_idx].id = shadow_ring[i].id;
1658                 vq->desc_packed[used_idx].len = shadow_ring[i].len;
1659
1660                 used_idx += shadow_ring[i].count;
1661                 if (used_idx >= vq->size)
1662                         used_idx -= vq->size;
1663         }
1664
1665         /* The ordering for storing desc flags needs to be enforced. */
1666         rte_atomic_thread_fence(__ATOMIC_RELEASE);
1667
1668         for (i = 0; i < count; i++) {
1669                 uint16_t flags;
1670
1671                 if (vq->shadow_used_packed[i].len)
1672                         flags = VRING_DESC_F_WRITE;
1673                 else
1674                         flags = 0;
1675
1676                 if (vq->used_wrap_counter) {
1677                         flags |= VRING_DESC_F_USED;
1678                         flags |= VRING_DESC_F_AVAIL;
1679                 } else {
1680                         flags &= ~VRING_DESC_F_USED;
1681                         flags &= ~VRING_DESC_F_AVAIL;
1682                 }
1683
1684                 if (i > 0) {
1685                         vq->desc_packed[vq->last_used_idx].flags = flags;
1686                 } else {
1687                         head_idx = vq->last_used_idx;
1688                         head_flags = flags;
1689                 }
1690
1691                 vq_inc_last_used_packed(vq, shadow_ring[i].count);
1692         }
1693
1694         vq->desc_packed[head_idx].flags = head_flags;
1695 }
1696
1697 static __rte_always_inline int
1698 vhost_enqueue_async_packed(struct virtio_net *dev,
1699                             struct vhost_virtqueue *vq,
1700                             struct rte_mbuf *pkt,
1701                             struct buf_vector *buf_vec,
1702                             uint16_t *nr_descs,
1703                             uint16_t *nr_buffers)
1704 {
1705         uint16_t nr_vec = 0;
1706         uint16_t avail_idx = vq->last_avail_idx;
1707         uint16_t max_tries, tries = 0;
1708         uint16_t buf_id = 0;
1709         uint32_t len = 0;
1710         uint16_t desc_count = 0;
1711         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1712         uint32_t buffer_len[vq->size];
1713         uint16_t buffer_buf_id[vq->size];
1714         uint16_t buffer_desc_count[vq->size];
1715
1716         if (rxvq_is_mergeable(dev))
1717                 max_tries = vq->size - 1;
1718         else
1719                 max_tries = 1;
1720
1721         while (size > 0) {
1722                 /*
1723                  * if we tried all available ring items, and still
1724                  * can't get enough buf, it means something abnormal
1725                  * happened.
1726                  */
1727                 if (unlikely(++tries > max_tries))
1728                         return -1;
1729
1730                 if (unlikely(fill_vec_buf_packed(dev, vq,
1731                                                 avail_idx, &desc_count,
1732                                                 buf_vec, &nr_vec,
1733                                                 &buf_id, &len,
1734                                                 VHOST_ACCESS_RW) < 0))
1735                         return -1;
1736
1737                 len = RTE_MIN(len, size);
1738                 size -= len;
1739
1740                 buffer_len[*nr_buffers] = len;
1741                 buffer_buf_id[*nr_buffers] = buf_id;
1742                 buffer_desc_count[*nr_buffers] = desc_count;
1743                 *nr_buffers += 1;
1744                 *nr_descs += desc_count;
1745                 avail_idx += desc_count;
1746                 if (avail_idx >= vq->size)
1747                         avail_idx -= vq->size;
1748         }
1749
1750         if (unlikely(async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec,
1751                                         *nr_buffers) < 0))
1752                 return -1;
1753
1754         vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
1755
1756         return 0;
1757 }
1758
1759 static __rte_always_inline int16_t
1760 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1761                             struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
1762 {
1763         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1764
1765         if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
1766                                         nr_descs, nr_buffers) < 0)) {
1767                 VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n", dev->vid);
1768                 return -1;
1769         }
1770
1771         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1772                         dev->vid, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
1773
1774         return 0;
1775 }
1776
1777 static __rte_always_inline void
1778 dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
1779                         uint32_t nr_err, uint32_t *pkt_idx)
1780 {
1781         uint16_t descs_err = 0;
1782         uint16_t buffers_err = 0;
1783         struct async_inflight_info *pkts_info = vq->async->pkts_info;
1784
1785         *pkt_idx -= nr_err;
1786         /* calculate the sum of buffers and descs of DMA-error packets. */
1787         while (nr_err-- > 0) {
1788                 descs_err += pkts_info[slot_idx % vq->size].descs;
1789                 buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
1790                 slot_idx--;
1791         }
1792
1793         if (vq->last_avail_idx >= descs_err) {
1794                 vq->last_avail_idx -= descs_err;
1795         } else {
1796                 vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
1797                 vq->avail_wrap_counter ^= 1;
1798         }
1799
1800         vq->shadow_used_idx -= buffers_err;
1801 }
1802
1803 static __rte_noinline uint32_t
1804 virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
1805         struct vhost_virtqueue *vq, uint16_t queue_id,
1806         struct rte_mbuf **pkts, uint32_t count)
1807 {
1808         uint32_t pkt_idx = 0;
1809         uint32_t remained = count;
1810         int32_t n_xfer;
1811         uint16_t num_buffers;
1812         uint16_t num_descs;
1813
1814         struct vhost_async *async = vq->async;
1815         struct rte_vhost_async_desc async_descs[MAX_PKT_BURST];
1816         struct async_inflight_info *pkts_info = async->pkts_info;
1817         uint32_t pkt_err = 0;
1818         uint16_t slot_idx = 0;
1819
1820         do {
1821                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1822
1823                 num_buffers = 0;
1824                 num_descs = 0;
1825                 if (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],
1826                                                 &num_descs, &num_buffers) < 0))
1827                         break;
1828
1829                 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
1830
1831                 pkts_info[slot_idx].descs = num_descs;
1832                 pkts_info[slot_idx].nr_buffers = num_buffers;
1833                 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1834
1835                 pkt_idx++;
1836                 remained--;
1837                 vq_inc_last_avail_packed(vq, num_descs);
1838         } while (pkt_idx < count);
1839
1840         if (unlikely(pkt_idx == 0))
1841                 return 0;
1842
1843         async_fill_descs(async, async_descs);
1844
1845         n_xfer = async->ops.transfer_data(dev->vid, queue_id, async_descs, 0, pkt_idx);
1846         if (unlikely(n_xfer < 0)) {
1847                 VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
1848                                 dev->vid, __func__, queue_id);
1849                 n_xfer = 0;
1850         }
1851
1852         pkt_err = pkt_idx - n_xfer;
1853
1854         async_iter_reset(async);
1855
1856         if (unlikely(pkt_err))
1857                 dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
1858
1859         if (likely(vq->shadow_used_idx)) {
1860                 /* keep used descriptors. */
1861                 store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
1862                                         vq->size, 0, async->buffer_idx_packed,
1863                                         vq->shadow_used_idx);
1864
1865                 async->buffer_idx_packed += vq->shadow_used_idx;
1866                 if (async->buffer_idx_packed >= vq->size)
1867                         async->buffer_idx_packed -= vq->size;
1868
1869                 async->pkts_idx += pkt_idx;
1870                 if (async->pkts_idx >= vq->size)
1871                         async->pkts_idx -= vq->size;
1872
1873                 vq->shadow_used_idx = 0;
1874                 async->pkts_inflight_n += pkt_idx;
1875         }
1876
1877         return pkt_idx;
1878 }
1879
1880 static __rte_always_inline void
1881 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
1882 {
1883         struct vhost_async *async = vq->async;
1884         uint16_t nr_left = n_descs;
1885         uint16_t nr_copy;
1886         uint16_t to, from;
1887
1888         do {
1889                 from = async->last_desc_idx_split & (vq->size - 1);
1890                 nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
1891                 to = vq->last_used_idx & (vq->size - 1);
1892
1893                 if (to + nr_copy <= vq->size) {
1894                         rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1895                                         nr_copy * sizeof(struct vring_used_elem));
1896                 } else {
1897                         uint16_t size = vq->size - to;
1898
1899                         rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1900                                         size * sizeof(struct vring_used_elem));
1901                         rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
1902                                         (nr_copy - size) * sizeof(struct vring_used_elem));
1903                 }
1904
1905                 async->last_desc_idx_split += nr_copy;
1906                 vq->last_used_idx += nr_copy;
1907                 nr_left -= nr_copy;
1908         } while (nr_left > 0);
1909 }
1910
1911 static __rte_always_inline void
1912 write_back_completed_descs_packed(struct vhost_virtqueue *vq,
1913                                 uint16_t n_buffers)
1914 {
1915         struct vhost_async *async = vq->async;
1916         uint16_t nr_left = n_buffers;
1917         uint16_t from, to;
1918
1919         do {
1920                 from = async->last_buffer_idx_packed;
1921                 to = (from + nr_left) % vq->size;
1922                 if (to > from) {
1923                         vhost_update_used_packed(vq, async->buffers_packed + from, to - from);
1924                         async->last_buffer_idx_packed += nr_left;
1925                         nr_left = 0;
1926                 } else {
1927                         vhost_update_used_packed(vq, async->buffers_packed + from,
1928                                 vq->size - from);
1929                         async->last_buffer_idx_packed = 0;
1930                         nr_left -= vq->size - from;
1931                 }
1932         } while (nr_left > 0);
1933 }
1934
1935 static __rte_always_inline uint16_t
1936 vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
1937                 struct rte_mbuf **pkts, uint16_t count)
1938 {
1939         struct vhost_virtqueue *vq;
1940         struct vhost_async *async;
1941         struct async_inflight_info *pkts_info;
1942         int32_t n_cpl;
1943         uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
1944         uint16_t start_idx, pkts_idx, vq_size;
1945         uint16_t from, i;
1946
1947         vq = dev->virtqueue[queue_id];
1948         async = vq->async;
1949         pkts_idx = async->pkts_idx % vq->size;
1950         pkts_info = async->pkts_info;
1951         vq_size = vq->size;
1952         start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
1953                 vq_size, async->pkts_inflight_n);
1954
1955         if (count > async->last_pkts_n) {
1956                 n_cpl = async->ops.check_completed_copies(dev->vid,
1957                         queue_id, 0, count - async->last_pkts_n);
1958                 if (likely(n_cpl >= 0)) {
1959                         n_pkts_cpl = n_cpl;
1960                 } else {
1961                         VHOST_LOG_DATA(ERR,
1962                                 "(%d) %s: failed to check completed copies for queue id %d.\n",
1963                                 dev->vid, __func__, queue_id);
1964                         n_pkts_cpl = 0;
1965                 }
1966         }
1967
1968         n_pkts_cpl += async->last_pkts_n;
1969         n_pkts_put = RTE_MIN(n_pkts_cpl, count);
1970         if (unlikely(n_pkts_put == 0)) {
1971                 async->last_pkts_n = n_pkts_cpl;
1972                 return 0;
1973         }
1974
1975         if (vq_is_packed(dev)) {
1976                 for (i = 0; i < n_pkts_put; i++) {
1977                         from = (start_idx + i) % vq_size;
1978                         n_buffers += pkts_info[from].nr_buffers;
1979                         pkts[i] = pkts_info[from].mbuf;
1980                 }
1981         } else {
1982                 for (i = 0; i < n_pkts_put; i++) {
1983                         from = (start_idx + i) & (vq_size - 1);
1984                         n_descs += pkts_info[from].descs;
1985                         pkts[i] = pkts_info[from].mbuf;
1986                 }
1987         }
1988         async->last_pkts_n = n_pkts_cpl - n_pkts_put;
1989         async->pkts_inflight_n -= n_pkts_put;
1990
1991         if (likely(vq->enabled && vq->access_ok)) {
1992                 if (vq_is_packed(dev)) {
1993                         write_back_completed_descs_packed(vq, n_buffers);
1994
1995                         vhost_vring_call_packed(dev, vq);
1996                 } else {
1997                         write_back_completed_descs_split(vq, n_descs);
1998
1999                         __atomic_add_fetch(&vq->used->idx, n_descs,
2000                                         __ATOMIC_RELEASE);
2001                         vhost_vring_call_split(dev, vq);
2002                 }
2003         } else {
2004                 if (vq_is_packed(dev)) {
2005                         async->last_buffer_idx_packed += n_buffers;
2006                         if (async->last_buffer_idx_packed >= vq->size)
2007                                 async->last_buffer_idx_packed -= vq->size;
2008                 } else {
2009                         async->last_desc_idx_split += n_descs;
2010                 }
2011         }
2012
2013         return n_pkts_put;
2014 }
2015
2016 uint16_t
2017 rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
2018                 struct rte_mbuf **pkts, uint16_t count)
2019 {
2020         struct virtio_net *dev = get_device(vid);
2021         struct vhost_virtqueue *vq;
2022         uint16_t n_pkts_cpl = 0;
2023
2024         if (unlikely(!dev))
2025                 return 0;
2026
2027         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2028         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2029                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2030                         dev->vid, __func__, queue_id);
2031                 return 0;
2032         }
2033
2034         vq = dev->virtqueue[queue_id];
2035
2036         if (unlikely(!vq->async)) {
2037                 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
2038                         dev->vid, __func__, queue_id);
2039                 return 0;
2040         }
2041
2042         rte_spinlock_lock(&vq->access_lock);
2043
2044         n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
2045
2046         rte_spinlock_unlock(&vq->access_lock);
2047
2048         return n_pkts_cpl;
2049 }
2050
2051 uint16_t
2052 rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
2053                 struct rte_mbuf **pkts, uint16_t count)
2054 {
2055         struct virtio_net *dev = get_device(vid);
2056         struct vhost_virtqueue *vq;
2057         uint16_t n_pkts_cpl = 0;
2058
2059         if (!dev)
2060                 return 0;
2061
2062         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2063         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2064                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2065                         dev->vid, __func__, queue_id);
2066                 return 0;
2067         }
2068
2069         vq = dev->virtqueue[queue_id];
2070
2071         if (unlikely(!vq->async)) {
2072                 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
2073                         dev->vid, __func__, queue_id);
2074                 return 0;
2075         }
2076
2077         n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
2078
2079         return n_pkts_cpl;
2080 }
2081
2082 static __rte_always_inline uint32_t
2083 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
2084         struct rte_mbuf **pkts, uint32_t count)
2085 {
2086         struct vhost_virtqueue *vq;
2087         uint32_t nb_tx = 0;
2088
2089         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2090         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2091                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2092                         dev->vid, __func__, queue_id);
2093                 return 0;
2094         }
2095
2096         vq = dev->virtqueue[queue_id];
2097
2098         rte_spinlock_lock(&vq->access_lock);
2099
2100         if (unlikely(!vq->enabled || !vq->async))
2101                 goto out_access_unlock;
2102
2103         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2104                 vhost_user_iotlb_rd_lock(vq);
2105
2106         if (unlikely(!vq->access_ok))
2107                 if (unlikely(vring_translate(dev, vq) < 0))
2108                         goto out;
2109
2110         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
2111         if (count == 0)
2112                 goto out;
2113
2114         if (vq_is_packed(dev))
2115                 nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, queue_id,
2116                                 pkts, count);
2117         else
2118                 nb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,
2119                                 pkts, count);
2120
2121 out:
2122         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2123                 vhost_user_iotlb_rd_unlock(vq);
2124
2125 out_access_unlock:
2126         rte_spinlock_unlock(&vq->access_lock);
2127
2128         return nb_tx;
2129 }
2130
2131 uint16_t
2132 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
2133                 struct rte_mbuf **pkts, uint16_t count)
2134 {
2135         struct virtio_net *dev = get_device(vid);
2136
2137         if (!dev)
2138                 return 0;
2139
2140         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2141                 VHOST_LOG_DATA(ERR,
2142                         "(%d) %s: built-in vhost net backend is disabled.\n",
2143                         dev->vid, __func__);
2144                 return 0;
2145         }
2146
2147         return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
2148 }
2149
2150 static inline bool
2151 virtio_net_with_host_offload(struct virtio_net *dev)
2152 {
2153         if (dev->features &
2154                         ((1ULL << VIRTIO_NET_F_CSUM) |
2155                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
2156                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2157                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
2158                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
2159                 return true;
2160
2161         return false;
2162 }
2163
2164 static int
2165 parse_headers(struct rte_mbuf *m, uint8_t *l4_proto)
2166 {
2167         struct rte_ipv4_hdr *ipv4_hdr;
2168         struct rte_ipv6_hdr *ipv6_hdr;
2169         struct rte_ether_hdr *eth_hdr;
2170         uint16_t ethertype;
2171         uint16_t data_len = rte_pktmbuf_data_len(m);
2172
2173         if (data_len < sizeof(struct rte_ether_hdr))
2174                 return -EINVAL;
2175
2176         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2177
2178         m->l2_len = sizeof(struct rte_ether_hdr);
2179         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
2180
2181         if (ethertype == RTE_ETHER_TYPE_VLAN) {
2182                 if (data_len < sizeof(struct rte_ether_hdr) +
2183                                 sizeof(struct rte_vlan_hdr))
2184                         goto error;
2185
2186                 struct rte_vlan_hdr *vlan_hdr =
2187                         (struct rte_vlan_hdr *)(eth_hdr + 1);
2188
2189                 m->l2_len += sizeof(struct rte_vlan_hdr);
2190                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
2191         }
2192
2193         switch (ethertype) {
2194         case RTE_ETHER_TYPE_IPV4:
2195                 if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))
2196                         goto error;
2197                 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2198                                 m->l2_len);
2199                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
2200                 if (data_len < m->l2_len + m->l3_len)
2201                         goto error;
2202                 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
2203                 *l4_proto = ipv4_hdr->next_proto_id;
2204                 break;
2205         case RTE_ETHER_TYPE_IPV6:
2206                 if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
2207                         goto error;
2208                 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
2209                                 m->l2_len);
2210                 m->l3_len = sizeof(struct rte_ipv6_hdr);
2211                 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
2212                 *l4_proto = ipv6_hdr->proto;
2213                 break;
2214         default:
2215                 /* a valid L3 header is needed for further L4 parsing */
2216                 goto error;
2217         }
2218
2219         /* both CSUM and GSO need a valid L4 header */
2220         switch (*l4_proto) {
2221         case IPPROTO_TCP:
2222                 if (data_len < m->l2_len + m->l3_len +
2223                                 sizeof(struct rte_tcp_hdr))
2224                         goto error;
2225                 break;
2226         case IPPROTO_UDP:
2227                 if (data_len < m->l2_len + m->l3_len +
2228                                 sizeof(struct rte_udp_hdr))
2229                         goto error;
2230                 break;
2231         case IPPROTO_SCTP:
2232                 if (data_len < m->l2_len + m->l3_len +
2233                                 sizeof(struct rte_sctp_hdr))
2234                         goto error;
2235                 break;
2236         default:
2237                 goto error;
2238         }
2239
2240         return 0;
2241
2242 error:
2243         m->l2_len = 0;
2244         m->l3_len = 0;
2245         m->ol_flags = 0;
2246         return -EINVAL;
2247 }
2248
2249 static __rte_always_inline void
2250 vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
2251 {
2252         uint8_t l4_proto = 0;
2253         struct rte_tcp_hdr *tcp_hdr = NULL;
2254         uint16_t tcp_len;
2255         uint16_t data_len = rte_pktmbuf_data_len(m);
2256
2257         if (parse_headers(m, &l4_proto) < 0)
2258                 return;
2259
2260         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2261                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
2262                         switch (hdr->csum_offset) {
2263                         case (offsetof(struct rte_tcp_hdr, cksum)):
2264                                 if (l4_proto != IPPROTO_TCP)
2265                                         goto error;
2266                                 m->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
2267                                 break;
2268                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
2269                                 if (l4_proto != IPPROTO_UDP)
2270                                         goto error;
2271                                 m->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
2272                                 break;
2273                         case (offsetof(struct rte_sctp_hdr, cksum)):
2274                                 if (l4_proto != IPPROTO_SCTP)
2275                                         goto error;
2276                                 m->ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
2277                                 break;
2278                         default:
2279                                 goto error;
2280                         }
2281                 } else {
2282                         goto error;
2283                 }
2284         }
2285
2286         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2287                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2288                 case VIRTIO_NET_HDR_GSO_TCPV4:
2289                 case VIRTIO_NET_HDR_GSO_TCPV6:
2290                         if (l4_proto != IPPROTO_TCP)
2291                                 goto error;
2292                         tcp_hdr = rte_pktmbuf_mtod_offset(m,
2293                                         struct rte_tcp_hdr *,
2294                                         m->l2_len + m->l3_len);
2295                         tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
2296                         if (data_len < m->l2_len + m->l3_len + tcp_len)
2297                                 goto error;
2298                         m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
2299                         m->tso_segsz = hdr->gso_size;
2300                         m->l4_len = tcp_len;
2301                         break;
2302                 case VIRTIO_NET_HDR_GSO_UDP:
2303                         if (l4_proto != IPPROTO_UDP)
2304                                 goto error;
2305                         m->ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
2306                         m->tso_segsz = hdr->gso_size;
2307                         m->l4_len = sizeof(struct rte_udp_hdr);
2308                         break;
2309                 default:
2310                         VHOST_LOG_DATA(WARNING,
2311                                 "unsupported gso type %u.\n", hdr->gso_type);
2312                         goto error;
2313                 }
2314         }
2315         return;
2316
2317 error:
2318         m->l2_len = 0;
2319         m->l3_len = 0;
2320         m->ol_flags = 0;
2321 }
2322
2323 static __rte_always_inline void
2324 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m,
2325         bool legacy_ol_flags)
2326 {
2327         struct rte_net_hdr_lens hdr_lens;
2328         int l4_supported = 0;
2329         uint32_t ptype;
2330
2331         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
2332                 return;
2333
2334         if (legacy_ol_flags) {
2335                 vhost_dequeue_offload_legacy(hdr, m);
2336                 return;
2337         }
2338
2339         m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
2340
2341         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
2342         m->packet_type = ptype;
2343         if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
2344             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
2345             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
2346                 l4_supported = 1;
2347
2348         /* According to Virtio 1.1 spec, the device only needs to look at
2349          * VIRTIO_NET_HDR_F_NEEDS_CSUM in the packet transmission path.
2350          * This differs from the processing incoming packets path where the
2351          * driver could rely on VIRTIO_NET_HDR_F_DATA_VALID flag set by the
2352          * device.
2353          *
2354          * 5.1.6.2.1 Driver Requirements: Packet Transmission
2355          * The driver MUST NOT set the VIRTIO_NET_HDR_F_DATA_VALID and
2356          * VIRTIO_NET_HDR_F_RSC_INFO bits in flags.
2357          *
2358          * 5.1.6.2.2 Device Requirements: Packet Transmission
2359          * The device MUST ignore flag bits that it does not recognize.
2360          */
2361         if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2362                 uint32_t hdrlen;
2363
2364                 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
2365                 if (hdr->csum_start <= hdrlen && l4_supported != 0) {
2366                         m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
2367                 } else {
2368                         /* Unknown proto or tunnel, do sw cksum. We can assume
2369                          * the cksum field is in the first segment since the
2370                          * buffers we provided to the host are large enough.
2371                          * In case of SCTP, this will be wrong since it's a CRC
2372                          * but there's nothing we can do.
2373                          */
2374                         uint16_t csum = 0, off;
2375
2376                         if (rte_raw_cksum_mbuf(m, hdr->csum_start,
2377                                         rte_pktmbuf_pkt_len(m) - hdr->csum_start, &csum) < 0)
2378                                 return;
2379                         if (likely(csum != 0xffff))
2380                                 csum = ~csum;
2381                         off = hdr->csum_offset + hdr->csum_start;
2382                         if (rte_pktmbuf_data_len(m) >= off + 1)
2383                                 *rte_pktmbuf_mtod_offset(m, uint16_t *, off) = csum;
2384                 }
2385         }
2386
2387         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2388                 if (hdr->gso_size == 0)
2389                         return;
2390
2391                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2392                 case VIRTIO_NET_HDR_GSO_TCPV4:
2393                 case VIRTIO_NET_HDR_GSO_TCPV6:
2394                         if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
2395                                 break;
2396                         m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2397                         m->tso_segsz = hdr->gso_size;
2398                         break;
2399                 case VIRTIO_NET_HDR_GSO_UDP:
2400                         if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
2401                                 break;
2402                         m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2403                         m->tso_segsz = hdr->gso_size;
2404                         break;
2405                 default:
2406                         break;
2407                 }
2408         }
2409 }
2410
2411 static __rte_noinline void
2412 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
2413                 struct buf_vector *buf_vec)
2414 {
2415         uint64_t len;
2416         uint64_t remain = sizeof(struct virtio_net_hdr);
2417         uint64_t src;
2418         uint64_t dst = (uint64_t)(uintptr_t)hdr;
2419
2420         while (remain) {
2421                 len = RTE_MIN(remain, buf_vec->buf_len);
2422                 src = buf_vec->buf_addr;
2423                 rte_memcpy((void *)(uintptr_t)dst,
2424                                 (void *)(uintptr_t)src, len);
2425
2426                 remain -= len;
2427                 dst += len;
2428                 buf_vec++;
2429         }
2430 }
2431
2432 static __rte_always_inline int
2433 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
2434                   struct buf_vector *buf_vec, uint16_t nr_vec,
2435                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
2436                   bool legacy_ol_flags)
2437 {
2438         uint32_t buf_avail, buf_offset;
2439         uint64_t buf_addr, buf_len;
2440         uint32_t mbuf_avail, mbuf_offset;
2441         uint32_t cpy_len;
2442         struct rte_mbuf *cur = m, *prev = m;
2443         struct virtio_net_hdr tmp_hdr;
2444         struct virtio_net_hdr *hdr = NULL;
2445         /* A counter to avoid desc dead loop chain */
2446         uint16_t vec_idx = 0;
2447         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
2448         int error = 0;
2449
2450         buf_addr = buf_vec[vec_idx].buf_addr;
2451         buf_len = buf_vec[vec_idx].buf_len;
2452
2453         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
2454                 error = -1;
2455                 goto out;
2456         }
2457
2458         if (virtio_net_with_host_offload(dev)) {
2459                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
2460                         /*
2461                          * No luck, the virtio-net header doesn't fit
2462                          * in a contiguous virtual area.
2463                          */
2464                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
2465                         hdr = &tmp_hdr;
2466                 } else {
2467                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
2468                 }
2469         }
2470
2471         /*
2472          * A virtio driver normally uses at least 2 desc buffers
2473          * for Tx: the first for storing the header, and others
2474          * for storing the data.
2475          */
2476         if (unlikely(buf_len < dev->vhost_hlen)) {
2477                 buf_offset = dev->vhost_hlen - buf_len;
2478                 vec_idx++;
2479                 buf_addr = buf_vec[vec_idx].buf_addr;
2480                 buf_len = buf_vec[vec_idx].buf_len;
2481                 buf_avail  = buf_len - buf_offset;
2482         } else if (buf_len == dev->vhost_hlen) {
2483                 if (unlikely(++vec_idx >= nr_vec))
2484                         goto out;
2485                 buf_addr = buf_vec[vec_idx].buf_addr;
2486                 buf_len = buf_vec[vec_idx].buf_len;
2487
2488                 buf_offset = 0;
2489                 buf_avail = buf_len;
2490         } else {
2491                 buf_offset = dev->vhost_hlen;
2492                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
2493         }
2494
2495         PRINT_PACKET(dev,
2496                         (uintptr_t)(buf_addr + buf_offset),
2497                         (uint32_t)buf_avail, 0);
2498
2499         mbuf_offset = 0;
2500         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
2501         while (1) {
2502                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
2503
2504                 if (likely(cpy_len > MAX_BATCH_LEN ||
2505                                         vq->batch_copy_nb_elems >= vq->size ||
2506                                         (hdr && cur == m))) {
2507                         rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
2508                                                 mbuf_offset),
2509                                         (void *)((uintptr_t)(buf_addr +
2510                                                         buf_offset)), cpy_len);
2511                 } else {
2512                         batch_copy[vq->batch_copy_nb_elems].dst =
2513                                 rte_pktmbuf_mtod_offset(cur, void *,
2514                                                 mbuf_offset);
2515                         batch_copy[vq->batch_copy_nb_elems].src =
2516                                 (void *)((uintptr_t)(buf_addr + buf_offset));
2517                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
2518                         vq->batch_copy_nb_elems++;
2519                 }
2520
2521                 mbuf_avail  -= cpy_len;
2522                 mbuf_offset += cpy_len;
2523                 buf_avail -= cpy_len;
2524                 buf_offset += cpy_len;
2525
2526                 /* This buf reaches to its end, get the next one */
2527                 if (buf_avail == 0) {
2528                         if (++vec_idx >= nr_vec)
2529                                 break;
2530
2531                         buf_addr = buf_vec[vec_idx].buf_addr;
2532                         buf_len = buf_vec[vec_idx].buf_len;
2533
2534                         buf_offset = 0;
2535                         buf_avail  = buf_len;
2536
2537                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
2538                                         (uint32_t)buf_avail, 0);
2539                 }
2540
2541                 /*
2542                  * This mbuf reaches to its end, get a new one
2543                  * to hold more data.
2544                  */
2545                 if (mbuf_avail == 0) {
2546                         cur = rte_pktmbuf_alloc(mbuf_pool);
2547                         if (unlikely(cur == NULL)) {
2548                                 VHOST_LOG_DATA(ERR, "Failed to "
2549                                         "allocate memory for mbuf.\n");
2550                                 error = -1;
2551                                 goto out;
2552                         }
2553
2554                         prev->next = cur;
2555                         prev->data_len = mbuf_offset;
2556                         m->nb_segs += 1;
2557                         m->pkt_len += mbuf_offset;
2558                         prev = cur;
2559
2560                         mbuf_offset = 0;
2561                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2562                 }
2563         }
2564
2565         prev->data_len = mbuf_offset;
2566         m->pkt_len    += mbuf_offset;
2567
2568         if (hdr)
2569                 vhost_dequeue_offload(hdr, m, legacy_ol_flags);
2570
2571 out:
2572
2573         return error;
2574 }
2575
2576 static void
2577 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2578 {
2579         rte_free(opaque);
2580 }
2581
2582 static int
2583 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
2584 {
2585         struct rte_mbuf_ext_shared_info *shinfo = NULL;
2586         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2587         uint16_t buf_len;
2588         rte_iova_t iova;
2589         void *buf;
2590
2591         total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2592         total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2593
2594         if (unlikely(total_len > UINT16_MAX))
2595                 return -ENOSPC;
2596
2597         buf_len = total_len;
2598         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2599         if (unlikely(buf == NULL))
2600                 return -ENOMEM;
2601
2602         /* Initialize shinfo */
2603         shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2604                                                 virtio_dev_extbuf_free, buf);
2605         if (unlikely(shinfo == NULL)) {
2606                 rte_free(buf);
2607                 VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
2608                 return -1;
2609         }
2610
2611         iova = rte_malloc_virt2iova(buf);
2612         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2613         rte_pktmbuf_reset_headroom(pkt);
2614
2615         return 0;
2616 }
2617
2618 /*
2619  * Prepare a host supported pktmbuf.
2620  */
2621 static __rte_always_inline int
2622 virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
2623                          uint32_t data_len)
2624 {
2625         if (rte_pktmbuf_tailroom(pkt) >= data_len)
2626                 return 0;
2627
2628         /* attach an external buffer if supported */
2629         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
2630                 return 0;
2631
2632         /* check if chained buffers are allowed */
2633         if (!dev->linearbuf)
2634                 return 0;
2635
2636         return -1;
2637 }
2638
2639 __rte_always_inline
2640 static uint16_t
2641 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2642         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
2643         bool legacy_ol_flags)
2644 {
2645         uint16_t i;
2646         uint16_t free_entries;
2647         uint16_t dropped = 0;
2648         static bool allocerr_warned;
2649
2650         /*
2651          * The ordering between avail index and
2652          * desc reads needs to be enforced.
2653          */
2654         free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2655                         vq->last_avail_idx;
2656         if (free_entries == 0)
2657                 return 0;
2658
2659         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2660
2661         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2662
2663         count = RTE_MIN(count, MAX_PKT_BURST);
2664         count = RTE_MIN(count, free_entries);
2665         VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
2666                         dev->vid, count);
2667
2668         if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2669                 return 0;
2670
2671         for (i = 0; i < count; i++) {
2672                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2673                 uint16_t head_idx;
2674                 uint32_t buf_len;
2675                 uint16_t nr_vec = 0;
2676                 int err;
2677
2678                 if (unlikely(fill_vec_buf_split(dev, vq,
2679                                                 vq->last_avail_idx + i,
2680                                                 &nr_vec, buf_vec,
2681                                                 &head_idx, &buf_len,
2682                                                 VHOST_ACCESS_RO) < 0))
2683                         break;
2684
2685                 update_shadow_used_ring_split(vq, head_idx, 0);
2686
2687                 err = virtio_dev_pktmbuf_prep(dev, pkts[i], buf_len);
2688                 if (unlikely(err)) {
2689                         /*
2690                          * mbuf allocation fails for jumbo packets when external
2691                          * buffer allocation is not allowed and linear buffer
2692                          * is required. Drop this packet.
2693                          */
2694                         if (!allocerr_warned) {
2695                                 VHOST_LOG_DATA(ERR,
2696                                         "Failed mbuf alloc of size %d from %s on %s.\n",
2697                                         buf_len, mbuf_pool->name, dev->ifname);
2698                                 allocerr_warned = true;
2699                         }
2700                         dropped += 1;
2701                         i++;
2702                         break;
2703                 }
2704
2705                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2706                                 mbuf_pool, legacy_ol_flags);
2707                 if (unlikely(err)) {
2708                         if (!allocerr_warned) {
2709                                 VHOST_LOG_DATA(ERR,
2710                                         "Failed to copy desc to mbuf on %s.\n",
2711                                         dev->ifname);
2712                                 allocerr_warned = true;
2713                         }
2714                         dropped += 1;
2715                         i++;
2716                         break;
2717                 }
2718         }
2719
2720         if (dropped)
2721                 rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1);
2722
2723         vq->last_avail_idx += i;
2724
2725         do_data_copy_dequeue(vq);
2726         if (unlikely(i < count))
2727                 vq->shadow_used_idx = i;
2728         if (likely(vq->shadow_used_idx)) {
2729                 flush_shadow_used_ring_split(dev, vq);
2730                 vhost_vring_call_split(dev, vq);
2731         }
2732
2733         return (i - dropped);
2734 }
2735
2736 __rte_noinline
2737 static uint16_t
2738 virtio_dev_tx_split_legacy(struct virtio_net *dev,
2739         struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2740         struct rte_mbuf **pkts, uint16_t count)
2741 {
2742         return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
2743 }
2744
2745 __rte_noinline
2746 static uint16_t
2747 virtio_dev_tx_split_compliant(struct virtio_net *dev,
2748         struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2749         struct rte_mbuf **pkts, uint16_t count)
2750 {
2751         return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
2752 }
2753
2754 static __rte_always_inline int
2755 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2756                                  struct vhost_virtqueue *vq,
2757                                  struct rte_mbuf **pkts,
2758                                  uint16_t avail_idx,
2759                                  uintptr_t *desc_addrs,
2760                                  uint16_t *ids)
2761 {
2762         bool wrap = vq->avail_wrap_counter;
2763         struct vring_packed_desc *descs = vq->desc_packed;
2764         uint64_t lens[PACKED_BATCH_SIZE];
2765         uint64_t buf_lens[PACKED_BATCH_SIZE];
2766         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2767         uint16_t flags, i;
2768
2769         if (unlikely(avail_idx & PACKED_BATCH_MASK))
2770                 return -1;
2771         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2772                 return -1;
2773
2774         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2775                 flags = descs[avail_idx + i].flags;
2776                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2777                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
2778                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2779                         return -1;
2780         }
2781
2782         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2783
2784         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2785                 lens[i] = descs[avail_idx + i].len;
2786
2787         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2788                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2789                                                   descs[avail_idx + i].addr,
2790                                                   &lens[i], VHOST_ACCESS_RW);
2791         }
2792
2793         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2794                 if (unlikely(!desc_addrs[i]))
2795                         return -1;
2796                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2797                         return -1;
2798         }
2799
2800         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2801                 if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
2802                         goto err;
2803         }
2804
2805         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2806                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2807
2808         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2809                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2810                         goto err;
2811         }
2812
2813         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2814                 pkts[i]->pkt_len = lens[i] - buf_offset;
2815                 pkts[i]->data_len = pkts[i]->pkt_len;
2816                 ids[i] = descs[avail_idx + i].id;
2817         }
2818
2819         return 0;
2820
2821 err:
2822         return -1;
2823 }
2824
2825 static __rte_always_inline int
2826 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2827                            struct vhost_virtqueue *vq,
2828                            struct rte_mbuf **pkts,
2829                            bool legacy_ol_flags)
2830 {
2831         uint16_t avail_idx = vq->last_avail_idx;
2832         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2833         struct virtio_net_hdr *hdr;
2834         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2835         uint16_t ids[PACKED_BATCH_SIZE];
2836         uint16_t i;
2837
2838         if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
2839                                              desc_addrs, ids))
2840                 return -1;
2841
2842         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2843                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2844
2845         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2846                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2847                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2848                            pkts[i]->pkt_len);
2849
2850         if (virtio_net_with_host_offload(dev)) {
2851                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2852                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2853                         vhost_dequeue_offload(hdr, pkts[i], legacy_ol_flags);
2854                 }
2855         }
2856
2857         if (virtio_net_is_inorder(dev))
2858                 vhost_shadow_dequeue_batch_packed_inorder(vq,
2859                         ids[PACKED_BATCH_SIZE - 1]);
2860         else
2861                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2862
2863         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2864
2865         return 0;
2866 }
2867
2868 static __rte_always_inline int
2869 vhost_dequeue_single_packed(struct virtio_net *dev,
2870                             struct vhost_virtqueue *vq,
2871                             struct rte_mempool *mbuf_pool,
2872                             struct rte_mbuf *pkts,
2873                             uint16_t *buf_id,
2874                             uint16_t *desc_count,
2875                             bool legacy_ol_flags)
2876 {
2877         struct buf_vector buf_vec[BUF_VECTOR_MAX];
2878         uint32_t buf_len;
2879         uint16_t nr_vec = 0;
2880         int err;
2881         static bool allocerr_warned;
2882
2883         if (unlikely(fill_vec_buf_packed(dev, vq,
2884                                          vq->last_avail_idx, desc_count,
2885                                          buf_vec, &nr_vec,
2886                                          buf_id, &buf_len,
2887                                          VHOST_ACCESS_RO) < 0))
2888                 return -1;
2889
2890         if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
2891                 if (!allocerr_warned) {
2892                         VHOST_LOG_DATA(ERR,
2893                                 "Failed mbuf alloc of size %d from %s on %s.\n",
2894                                 buf_len, mbuf_pool->name, dev->ifname);
2895                         allocerr_warned = true;
2896                 }
2897                 return -1;
2898         }
2899
2900         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
2901                                 mbuf_pool, legacy_ol_flags);
2902         if (unlikely(err)) {
2903                 if (!allocerr_warned) {
2904                         VHOST_LOG_DATA(ERR,
2905                                 "Failed to copy desc to mbuf on %s.\n",
2906                                 dev->ifname);
2907                         allocerr_warned = true;
2908                 }
2909                 return -1;
2910         }
2911
2912         return 0;
2913 }
2914
2915 static __rte_always_inline int
2916 virtio_dev_tx_single_packed(struct virtio_net *dev,
2917                             struct vhost_virtqueue *vq,
2918                             struct rte_mempool *mbuf_pool,
2919                             struct rte_mbuf *pkts,
2920                             bool legacy_ol_flags)
2921 {
2922
2923         uint16_t buf_id, desc_count = 0;
2924         int ret;
2925
2926         ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2927                                         &desc_count, legacy_ol_flags);
2928
2929         if (likely(desc_count > 0)) {
2930                 if (virtio_net_is_inorder(dev))
2931                         vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
2932                                                                    desc_count);
2933                 else
2934                         vhost_shadow_dequeue_single_packed(vq, buf_id,
2935                                         desc_count);
2936
2937                 vq_inc_last_avail_packed(vq, desc_count);
2938         }
2939
2940         return ret;
2941 }
2942
2943 __rte_always_inline
2944 static uint16_t
2945 virtio_dev_tx_packed(struct virtio_net *dev,
2946                      struct vhost_virtqueue *__rte_restrict vq,
2947                      struct rte_mempool *mbuf_pool,
2948                      struct rte_mbuf **__rte_restrict pkts,
2949                      uint32_t count,
2950                      bool legacy_ol_flags)
2951 {
2952         uint32_t pkt_idx = 0;
2953
2954         if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2955                 return 0;
2956
2957         do {
2958                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2959
2960                 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
2961                         if (!virtio_dev_tx_batch_packed(dev, vq,
2962                                                         &pkts[pkt_idx],
2963                                                         legacy_ol_flags)) {
2964                                 pkt_idx += PACKED_BATCH_SIZE;
2965                                 continue;
2966                         }
2967                 }
2968
2969                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2970                                                 pkts[pkt_idx],
2971                                                 legacy_ol_flags))
2972                         break;
2973                 pkt_idx++;
2974         } while (pkt_idx < count);
2975
2976         if (pkt_idx != count)
2977                 rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
2978
2979         if (vq->shadow_used_idx) {
2980                 do_data_copy_dequeue(vq);
2981
2982                 vhost_flush_dequeue_shadow_packed(dev, vq);
2983                 vhost_vring_call_packed(dev, vq);
2984         }
2985
2986         return pkt_idx;
2987 }
2988
2989 __rte_noinline
2990 static uint16_t
2991 virtio_dev_tx_packed_legacy(struct virtio_net *dev,
2992         struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
2993         struct rte_mbuf **__rte_restrict pkts, uint32_t count)
2994 {
2995         return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
2996 }
2997
2998 __rte_noinline
2999 static uint16_t
3000 virtio_dev_tx_packed_compliant(struct virtio_net *dev,
3001         struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3002         struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3003 {
3004         return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
3005 }
3006
3007 uint16_t
3008 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
3009         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
3010 {
3011         struct virtio_net *dev;
3012         struct rte_mbuf *rarp_mbuf = NULL;
3013         struct vhost_virtqueue *vq;
3014         int16_t success = 1;
3015
3016         dev = get_device(vid);
3017         if (!dev)
3018                 return 0;
3019
3020         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
3021                 VHOST_LOG_DATA(ERR,
3022                         "(%d) %s: built-in vhost net backend is disabled.\n",
3023                         dev->vid, __func__);
3024                 return 0;
3025         }
3026
3027         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
3028                 VHOST_LOG_DATA(ERR,
3029                         "(%d) %s: invalid virtqueue idx %d.\n",
3030                         dev->vid, __func__, queue_id);
3031                 return 0;
3032         }
3033
3034         vq = dev->virtqueue[queue_id];
3035
3036         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
3037                 return 0;
3038
3039         if (unlikely(!vq->enabled)) {
3040                 count = 0;
3041                 goto out_access_unlock;
3042         }
3043
3044         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3045                 vhost_user_iotlb_rd_lock(vq);
3046
3047         if (unlikely(!vq->access_ok))
3048                 if (unlikely(vring_translate(dev, vq) < 0)) {
3049                         count = 0;
3050                         goto out;
3051                 }
3052
3053         /*
3054          * Construct a RARP broadcast packet, and inject it to the "pkts"
3055          * array, to looks like that guest actually send such packet.
3056          *
3057          * Check user_send_rarp() for more information.
3058          *
3059          * broadcast_rarp shares a cacheline in the virtio_net structure
3060          * with some fields that are accessed during enqueue and
3061          * __atomic_compare_exchange_n causes a write if performed compare
3062          * and exchange. This could result in false sharing between enqueue
3063          * and dequeue.
3064          *
3065          * Prevent unnecessary false sharing by reading broadcast_rarp first
3066          * and only performing compare and exchange if the read indicates it
3067          * is likely to be set.
3068          */
3069         if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
3070                         __atomic_compare_exchange_n(&dev->broadcast_rarp,
3071                         &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
3072
3073                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
3074                 if (rarp_mbuf == NULL) {
3075                         VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
3076                         count = 0;
3077                         goto out;
3078                 }
3079                 /*
3080                  * Inject it to the head of "pkts" array, so that switch's mac
3081                  * learning table will get updated first.
3082                  */
3083                 pkts[0] = rarp_mbuf;
3084                 pkts++;
3085                 count -= 1;
3086         }
3087
3088         if (vq_is_packed(dev)) {
3089                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3090                         count = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
3091                 else
3092                         count = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
3093         } else {
3094                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3095                         count = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
3096                 else
3097                         count = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
3098         }
3099
3100 out:
3101         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3102                 vhost_user_iotlb_rd_unlock(vq);
3103
3104 out_access_unlock:
3105         rte_spinlock_unlock(&vq->access_lock);
3106
3107         if (unlikely(rarp_mbuf != NULL))
3108                 count += 1;
3109
3110         return count;
3111 }