vhost: prepare async for mbuf to descriptor refactoring
[dpdk.git] / lib / vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_net.h>
12 #include <rte_ether.h>
13 #include <rte_ip.h>
14 #include <rte_vhost.h>
15 #include <rte_tcp.h>
16 #include <rte_udp.h>
17 #include <rte_sctp.h>
18 #include <rte_arp.h>
19 #include <rte_spinlock.h>
20 #include <rte_malloc.h>
21 #include <rte_vhost_async.h>
22
23 #include "iotlb.h"
24 #include "vhost.h"
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static  __rte_always_inline bool
35 virtio_net_is_inorder(struct virtio_net *dev)
36 {
37         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
38 }
39
40 static bool
41 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
42 {
43         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
44 }
45
46 static inline void
47 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
48 {
49         struct batch_copy_elem *elem = vq->batch_copy_elems;
50         uint16_t count = vq->batch_copy_nb_elems;
51         int i;
52
53         for (i = 0; i < count; i++) {
54                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
55                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
56                                            elem[i].len);
57                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
58         }
59
60         vq->batch_copy_nb_elems = 0;
61 }
62
63 static inline void
64 do_data_copy_dequeue(struct vhost_virtqueue *vq)
65 {
66         struct batch_copy_elem *elem = vq->batch_copy_elems;
67         uint16_t count = vq->batch_copy_nb_elems;
68         int i;
69
70         for (i = 0; i < count; i++)
71                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
72
73         vq->batch_copy_nb_elems = 0;
74 }
75
76 static __rte_always_inline void
77 do_flush_shadow_used_ring_split(struct virtio_net *dev,
78                         struct vhost_virtqueue *vq,
79                         uint16_t to, uint16_t from, uint16_t size)
80 {
81         rte_memcpy(&vq->used->ring[to],
82                         &vq->shadow_used_split[from],
83                         size * sizeof(struct vring_used_elem));
84         vhost_log_cache_used_vring(dev, vq,
85                         offsetof(struct vring_used, ring[to]),
86                         size * sizeof(struct vring_used_elem));
87 }
88
89 static __rte_always_inline void
90 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
91 {
92         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
93
94         if (used_idx + vq->shadow_used_idx <= vq->size) {
95                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
96                                           vq->shadow_used_idx);
97         } else {
98                 uint16_t size;
99
100                 /* update used ring interval [used_idx, vq->size] */
101                 size = vq->size - used_idx;
102                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
103
104                 /* update the left half used ring interval [0, left_size] */
105                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
106                                           vq->shadow_used_idx - size);
107         }
108         vq->last_used_idx += vq->shadow_used_idx;
109
110         vhost_log_cache_sync(dev, vq);
111
112         __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
113                            __ATOMIC_RELEASE);
114         vq->shadow_used_idx = 0;
115         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
116                 sizeof(vq->used->idx));
117 }
118
119 static __rte_always_inline void
120 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
121                          uint16_t desc_idx, uint32_t len)
122 {
123         uint16_t i = vq->shadow_used_idx++;
124
125         vq->shadow_used_split[i].id  = desc_idx;
126         vq->shadow_used_split[i].len = len;
127 }
128
129 static __rte_always_inline void
130 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
131                                   struct vhost_virtqueue *vq)
132 {
133         int i;
134         uint16_t used_idx = vq->last_used_idx;
135         uint16_t head_idx = vq->last_used_idx;
136         uint16_t head_flags = 0;
137
138         /* Split loop in two to save memory barriers */
139         for (i = 0; i < vq->shadow_used_idx; i++) {
140                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
141                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
142
143                 used_idx += vq->shadow_used_packed[i].count;
144                 if (used_idx >= vq->size)
145                         used_idx -= vq->size;
146         }
147
148         /* The ordering for storing desc flags needs to be enforced. */
149         rte_atomic_thread_fence(__ATOMIC_RELEASE);
150
151         for (i = 0; i < vq->shadow_used_idx; i++) {
152                 uint16_t flags;
153
154                 if (vq->shadow_used_packed[i].len)
155                         flags = VRING_DESC_F_WRITE;
156                 else
157                         flags = 0;
158
159                 if (vq->used_wrap_counter) {
160                         flags |= VRING_DESC_F_USED;
161                         flags |= VRING_DESC_F_AVAIL;
162                 } else {
163                         flags &= ~VRING_DESC_F_USED;
164                         flags &= ~VRING_DESC_F_AVAIL;
165                 }
166
167                 if (i > 0) {
168                         vq->desc_packed[vq->last_used_idx].flags = flags;
169
170                         vhost_log_cache_used_vring(dev, vq,
171                                         vq->last_used_idx *
172                                         sizeof(struct vring_packed_desc),
173                                         sizeof(struct vring_packed_desc));
174                 } else {
175                         head_idx = vq->last_used_idx;
176                         head_flags = flags;
177                 }
178
179                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
180         }
181
182         vq->desc_packed[head_idx].flags = head_flags;
183
184         vhost_log_cache_used_vring(dev, vq,
185                                 head_idx *
186                                 sizeof(struct vring_packed_desc),
187                                 sizeof(struct vring_packed_desc));
188
189         vq->shadow_used_idx = 0;
190         vhost_log_cache_sync(dev, vq);
191 }
192
193 static __rte_always_inline void
194 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
195                                   struct vhost_virtqueue *vq)
196 {
197         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
198
199         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
200         /* desc flags is the synchronization point for virtio packed vring */
201         __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
202                          used_elem->flags, __ATOMIC_RELEASE);
203
204         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
205                                    sizeof(struct vring_packed_desc),
206                                    sizeof(struct vring_packed_desc));
207         vq->shadow_used_idx = 0;
208         vhost_log_cache_sync(dev, vq);
209 }
210
211 static __rte_always_inline void
212 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
213                                  struct vhost_virtqueue *vq,
214                                  uint64_t *lens,
215                                  uint16_t *ids)
216 {
217         uint16_t i;
218         uint16_t flags;
219         uint16_t last_used_idx;
220         struct vring_packed_desc *desc_base;
221
222         last_used_idx = vq->last_used_idx;
223         desc_base = &vq->desc_packed[last_used_idx];
224
225         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
226
227         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
228                 desc_base[i].id = ids[i];
229                 desc_base[i].len = lens[i];
230         }
231
232         rte_atomic_thread_fence(__ATOMIC_RELEASE);
233
234         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
235                 desc_base[i].flags = flags;
236         }
237
238         vhost_log_cache_used_vring(dev, vq, last_used_idx *
239                                    sizeof(struct vring_packed_desc),
240                                    sizeof(struct vring_packed_desc) *
241                                    PACKED_BATCH_SIZE);
242         vhost_log_cache_sync(dev, vq);
243
244         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
245 }
246
247 static __rte_always_inline void
248 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
249                                           uint16_t id)
250 {
251         vq->shadow_used_packed[0].id = id;
252
253         if (!vq->shadow_used_idx) {
254                 vq->shadow_last_used_idx = vq->last_used_idx;
255                 vq->shadow_used_packed[0].flags =
256                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
257                 vq->shadow_used_packed[0].len = 0;
258                 vq->shadow_used_packed[0].count = 1;
259                 vq->shadow_used_idx++;
260         }
261
262         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
263 }
264
265 static __rte_always_inline void
266 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
267                                   struct vhost_virtqueue *vq,
268                                   uint16_t *ids)
269 {
270         uint16_t flags;
271         uint16_t i;
272         uint16_t begin;
273
274         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
275
276         if (!vq->shadow_used_idx) {
277                 vq->shadow_last_used_idx = vq->last_used_idx;
278                 vq->shadow_used_packed[0].id  = ids[0];
279                 vq->shadow_used_packed[0].len = 0;
280                 vq->shadow_used_packed[0].count = 1;
281                 vq->shadow_used_packed[0].flags = flags;
282                 vq->shadow_used_idx++;
283                 begin = 1;
284         } else
285                 begin = 0;
286
287         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
288                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
289                 vq->desc_packed[vq->last_used_idx + i].len = 0;
290         }
291
292         rte_atomic_thread_fence(__ATOMIC_RELEASE);
293         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
294                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
295
296         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
297                                    sizeof(struct vring_packed_desc),
298                                    sizeof(struct vring_packed_desc) *
299                                    PACKED_BATCH_SIZE);
300         vhost_log_cache_sync(dev, vq);
301
302         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
303 }
304
305 static __rte_always_inline void
306 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
307                                    uint16_t buf_id,
308                                    uint16_t count)
309 {
310         uint16_t flags;
311
312         flags = vq->desc_packed[vq->last_used_idx].flags;
313         if (vq->used_wrap_counter) {
314                 flags |= VRING_DESC_F_USED;
315                 flags |= VRING_DESC_F_AVAIL;
316         } else {
317                 flags &= ~VRING_DESC_F_USED;
318                 flags &= ~VRING_DESC_F_AVAIL;
319         }
320
321         if (!vq->shadow_used_idx) {
322                 vq->shadow_last_used_idx = vq->last_used_idx;
323
324                 vq->shadow_used_packed[0].id  = buf_id;
325                 vq->shadow_used_packed[0].len = 0;
326                 vq->shadow_used_packed[0].flags = flags;
327                 vq->shadow_used_idx++;
328         } else {
329                 vq->desc_packed[vq->last_used_idx].id = buf_id;
330                 vq->desc_packed[vq->last_used_idx].len = 0;
331                 vq->desc_packed[vq->last_used_idx].flags = flags;
332         }
333
334         vq_inc_last_used_packed(vq, count);
335 }
336
337 static __rte_always_inline void
338 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
339                                            uint16_t buf_id,
340                                            uint16_t count)
341 {
342         uint16_t flags;
343
344         vq->shadow_used_packed[0].id = buf_id;
345
346         flags = vq->desc_packed[vq->last_used_idx].flags;
347         if (vq->used_wrap_counter) {
348                 flags |= VRING_DESC_F_USED;
349                 flags |= VRING_DESC_F_AVAIL;
350         } else {
351                 flags &= ~VRING_DESC_F_USED;
352                 flags &= ~VRING_DESC_F_AVAIL;
353         }
354
355         if (!vq->shadow_used_idx) {
356                 vq->shadow_last_used_idx = vq->last_used_idx;
357                 vq->shadow_used_packed[0].len = 0;
358                 vq->shadow_used_packed[0].flags = flags;
359                 vq->shadow_used_idx++;
360         }
361
362         vq_inc_last_used_packed(vq, count);
363 }
364
365 static __rte_always_inline void
366 vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
367                                    uint32_t *len,
368                                    uint16_t *id,
369                                    uint16_t *count,
370                                    uint16_t num_buffers)
371 {
372         uint16_t i;
373
374         for (i = 0; i < num_buffers; i++) {
375                 /* enqueue shadow flush action aligned with batch num */
376                 if (!vq->shadow_used_idx)
377                         vq->shadow_aligned_idx = vq->last_used_idx &
378                                 PACKED_BATCH_MASK;
379                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
380                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
381                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
382                 vq->shadow_aligned_idx += count[i];
383                 vq->shadow_used_idx++;
384         }
385 }
386
387 static __rte_always_inline void
388 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
389                                    struct vhost_virtqueue *vq,
390                                    uint32_t *len,
391                                    uint16_t *id,
392                                    uint16_t *count,
393                                    uint16_t num_buffers)
394 {
395         vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
396
397         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
398                 do_data_copy_enqueue(dev, vq);
399                 vhost_flush_enqueue_shadow_packed(dev, vq);
400         }
401 }
402
403 /* avoid write operation when necessary, to lessen cache issues */
404 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
405         if ((var) != (val))                     \
406                 (var) = (val);                  \
407 } while (0)
408
409 static __rte_always_inline void
410 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
411 {
412         uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
413
414         if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
415                 csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
416
417         if (csum_l4) {
418                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
419                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
420
421                 switch (csum_l4) {
422                 case RTE_MBUF_F_TX_TCP_CKSUM:
423                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
424                                                 cksum));
425                         break;
426                 case RTE_MBUF_F_TX_UDP_CKSUM:
427                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
428                                                 dgram_cksum));
429                         break;
430                 case RTE_MBUF_F_TX_SCTP_CKSUM:
431                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
432                                                 cksum));
433                         break;
434                 }
435         } else {
436                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
437                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
438                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
439         }
440
441         /* IP cksum verification cannot be bypassed, then calculate here */
442         if (m_buf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
443                 struct rte_ipv4_hdr *ipv4_hdr;
444
445                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
446                                                    m_buf->l2_len);
447                 ipv4_hdr->hdr_checksum = 0;
448                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
449         }
450
451         if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
452                 if (m_buf->ol_flags & RTE_MBUF_F_TX_IPV4)
453                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
454                 else
455                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
456                 net_hdr->gso_size = m_buf->tso_segsz;
457                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
458                                         + m_buf->l4_len;
459         } else if (m_buf->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
460                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
461                 net_hdr->gso_size = m_buf->tso_segsz;
462                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
463                         m_buf->l4_len;
464         } else {
465                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
466                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
467                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
468         }
469 }
470
471 static __rte_always_inline int
472 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
473                 struct buf_vector *buf_vec, uint16_t *vec_idx,
474                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
475 {
476         uint16_t vec_id = *vec_idx;
477
478         while (desc_len) {
479                 uint64_t desc_addr;
480                 uint64_t desc_chunck_len = desc_len;
481
482                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
483                         return -1;
484
485                 desc_addr = vhost_iova_to_vva(dev, vq,
486                                 desc_iova,
487                                 &desc_chunck_len,
488                                 perm);
489                 if (unlikely(!desc_addr))
490                         return -1;
491
492                 rte_prefetch0((void *)(uintptr_t)desc_addr);
493
494                 buf_vec[vec_id].buf_iova = desc_iova;
495                 buf_vec[vec_id].buf_addr = desc_addr;
496                 buf_vec[vec_id].buf_len  = desc_chunck_len;
497
498                 desc_len -= desc_chunck_len;
499                 desc_iova += desc_chunck_len;
500                 vec_id++;
501         }
502         *vec_idx = vec_id;
503
504         return 0;
505 }
506
507 static __rte_always_inline int
508 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
509                          uint32_t avail_idx, uint16_t *vec_idx,
510                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
511                          uint32_t *desc_chain_len, uint8_t perm)
512 {
513         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
514         uint16_t vec_id = *vec_idx;
515         uint32_t len    = 0;
516         uint64_t dlen;
517         uint32_t nr_descs = vq->size;
518         uint32_t cnt    = 0;
519         struct vring_desc *descs = vq->desc;
520         struct vring_desc *idesc = NULL;
521
522         if (unlikely(idx >= vq->size))
523                 return -1;
524
525         *desc_chain_head = idx;
526
527         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
528                 dlen = vq->desc[idx].len;
529                 nr_descs = dlen / sizeof(struct vring_desc);
530                 if (unlikely(nr_descs > vq->size))
531                         return -1;
532
533                 descs = (struct vring_desc *)(uintptr_t)
534                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
535                                                 &dlen,
536                                                 VHOST_ACCESS_RO);
537                 if (unlikely(!descs))
538                         return -1;
539
540                 if (unlikely(dlen < vq->desc[idx].len)) {
541                         /*
542                          * The indirect desc table is not contiguous
543                          * in process VA space, we have to copy it.
544                          */
545                         idesc = vhost_alloc_copy_ind_table(dev, vq,
546                                         vq->desc[idx].addr, vq->desc[idx].len);
547                         if (unlikely(!idesc))
548                                 return -1;
549
550                         descs = idesc;
551                 }
552
553                 idx = 0;
554         }
555
556         while (1) {
557                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
558                         free_ind_table(idesc);
559                         return -1;
560                 }
561
562                 dlen = descs[idx].len;
563                 len += dlen;
564
565                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
566                                                 descs[idx].addr, dlen,
567                                                 perm))) {
568                         free_ind_table(idesc);
569                         return -1;
570                 }
571
572                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
573                         break;
574
575                 idx = descs[idx].next;
576         }
577
578         *desc_chain_len = len;
579         *vec_idx = vec_id;
580
581         if (unlikely(!!idesc))
582                 free_ind_table(idesc);
583
584         return 0;
585 }
586
587 /*
588  * Returns -1 on fail, 0 on success
589  */
590 static inline int
591 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
592                                 uint32_t size, struct buf_vector *buf_vec,
593                                 uint16_t *num_buffers, uint16_t avail_head,
594                                 uint16_t *nr_vec)
595 {
596         uint16_t cur_idx;
597         uint16_t vec_idx = 0;
598         uint16_t max_tries, tries = 0;
599
600         uint16_t head_idx = 0;
601         uint32_t len = 0;
602
603         *num_buffers = 0;
604         cur_idx  = vq->last_avail_idx;
605
606         if (rxvq_is_mergeable(dev))
607                 max_tries = vq->size - 1;
608         else
609                 max_tries = 1;
610
611         while (size > 0) {
612                 if (unlikely(cur_idx == avail_head))
613                         return -1;
614                 /*
615                  * if we tried all available ring items, and still
616                  * can't get enough buf, it means something abnormal
617                  * happened.
618                  */
619                 if (unlikely(++tries > max_tries))
620                         return -1;
621
622                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
623                                                 &vec_idx, buf_vec,
624                                                 &head_idx, &len,
625                                                 VHOST_ACCESS_RW) < 0))
626                         return -1;
627                 len = RTE_MIN(len, size);
628                 update_shadow_used_ring_split(vq, head_idx, len);
629                 size -= len;
630
631                 cur_idx++;
632                 *num_buffers += 1;
633         }
634
635         *nr_vec = vec_idx;
636
637         return 0;
638 }
639
640 static __rte_always_inline int
641 fill_vec_buf_packed_indirect(struct virtio_net *dev,
642                         struct vhost_virtqueue *vq,
643                         struct vring_packed_desc *desc, uint16_t *vec_idx,
644                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
645 {
646         uint16_t i;
647         uint32_t nr_descs;
648         uint16_t vec_id = *vec_idx;
649         uint64_t dlen;
650         struct vring_packed_desc *descs, *idescs = NULL;
651
652         dlen = desc->len;
653         descs = (struct vring_packed_desc *)(uintptr_t)
654                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
655         if (unlikely(!descs))
656                 return -1;
657
658         if (unlikely(dlen < desc->len)) {
659                 /*
660                  * The indirect desc table is not contiguous
661                  * in process VA space, we have to copy it.
662                  */
663                 idescs = vhost_alloc_copy_ind_table(dev,
664                                 vq, desc->addr, desc->len);
665                 if (unlikely(!idescs))
666                         return -1;
667
668                 descs = idescs;
669         }
670
671         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
672         if (unlikely(nr_descs >= vq->size)) {
673                 free_ind_table(idescs);
674                 return -1;
675         }
676
677         for (i = 0; i < nr_descs; i++) {
678                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
679                         free_ind_table(idescs);
680                         return -1;
681                 }
682
683                 dlen = descs[i].len;
684                 *len += dlen;
685                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
686                                                 descs[i].addr, dlen,
687                                                 perm)))
688                         return -1;
689         }
690         *vec_idx = vec_id;
691
692         if (unlikely(!!idescs))
693                 free_ind_table(idescs);
694
695         return 0;
696 }
697
698 static __rte_always_inline int
699 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
700                                 uint16_t avail_idx, uint16_t *desc_count,
701                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
702                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
703 {
704         bool wrap_counter = vq->avail_wrap_counter;
705         struct vring_packed_desc *descs = vq->desc_packed;
706         uint16_t vec_id = *vec_idx;
707         uint64_t dlen;
708
709         if (avail_idx < vq->last_avail_idx)
710                 wrap_counter ^= 1;
711
712         /*
713          * Perform a load-acquire barrier in desc_is_avail to
714          * enforce the ordering between desc flags and desc
715          * content.
716          */
717         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
718                 return -1;
719
720         *desc_count = 0;
721         *len = 0;
722
723         while (1) {
724                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
725                         return -1;
726
727                 if (unlikely(*desc_count >= vq->size))
728                         return -1;
729
730                 *desc_count += 1;
731                 *buf_id = descs[avail_idx].id;
732
733                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
734                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
735                                                         &descs[avail_idx],
736                                                         &vec_id, buf_vec,
737                                                         len, perm) < 0))
738                                 return -1;
739                 } else {
740                         dlen = descs[avail_idx].len;
741                         *len += dlen;
742
743                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
744                                                         descs[avail_idx].addr,
745                                                         dlen,
746                                                         perm)))
747                                 return -1;
748                 }
749
750                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
751                         break;
752
753                 if (++avail_idx >= vq->size) {
754                         avail_idx -= vq->size;
755                         wrap_counter ^= 1;
756                 }
757         }
758
759         *vec_idx = vec_id;
760
761         return 0;
762 }
763
764 static __rte_noinline void
765 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
766                 struct buf_vector *buf_vec,
767                 struct virtio_net_hdr_mrg_rxbuf *hdr)
768 {
769         uint64_t len;
770         uint64_t remain = dev->vhost_hlen;
771         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
772         uint64_t iova = buf_vec->buf_iova;
773
774         while (remain) {
775                 len = RTE_MIN(remain,
776                                 buf_vec->buf_len);
777                 dst = buf_vec->buf_addr;
778                 rte_memcpy((void *)(uintptr_t)dst,
779                                 (void *)(uintptr_t)src,
780                                 len);
781
782                 PRINT_PACKET(dev, (uintptr_t)dst,
783                                 (uint32_t)len, 0);
784                 vhost_log_cache_write_iova(dev, vq,
785                                 iova, len);
786
787                 remain -= len;
788                 iova += len;
789                 src += len;
790                 buf_vec++;
791         }
792 }
793
794 static __rte_always_inline int
795 async_iter_initialize(struct vhost_async *async)
796 {
797         struct rte_vhost_iov_iter *iter;
798
799         if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
800                 VHOST_LOG_DATA(ERR, "no more async iovec available\n");
801                 return -1;
802         }
803
804         iter = async->iov_iter + async->iter_idx;
805         iter->iov = async->iovec + async->iovec_idx;
806         iter->nr_segs = 0;
807
808         return 0;
809 }
810
811 static __rte_always_inline int
812 async_iter_add_iovec(struct vhost_async *async, void *src, void *dst, size_t len)
813 {
814         struct rte_vhost_iov_iter *iter;
815         struct rte_vhost_iovec *iovec;
816
817         if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
818                 static bool vhost_max_async_vec_log;
819
820                 if (!vhost_max_async_vec_log) {
821                         VHOST_LOG_DATA(ERR, "no more async iovec available\n");
822                         vhost_max_async_vec_log = true;
823                 }
824
825                 return -1;
826         }
827
828         iter = async->iov_iter + async->iter_idx;
829         iovec = async->iovec + async->iovec_idx;
830
831         iovec->src_addr = src;
832         iovec->dst_addr = dst;
833         iovec->len = len;
834
835         iter->nr_segs++;
836         async->iovec_idx++;
837
838         return 0;
839 }
840
841 static __rte_always_inline void
842 async_iter_finalize(struct vhost_async *async)
843 {
844         async->iter_idx++;
845 }
846
847 static __rte_always_inline void
848 async_iter_cancel(struct vhost_async *async)
849 {
850         struct rte_vhost_iov_iter *iter;
851
852         iter = async->iov_iter + async->iter_idx;
853         async->iovec_idx -= iter->nr_segs;
854         iter->nr_segs = 0;
855         iter->iov = NULL;
856 }
857
858 static __rte_always_inline void
859 async_iter_reset(struct vhost_async *async)
860 {
861         async->iter_idx = 0;
862         async->iovec_idx = 0;
863 }
864
865 static __rte_always_inline int
866 async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
867                 struct rte_mbuf *m, uint32_t mbuf_offset,
868                 uint64_t buf_iova, uint32_t cpy_len)
869 {
870         struct vhost_async *async = vq->async;
871         uint64_t mapped_len;
872         uint32_t buf_offset = 0;
873         void *hpa;
874
875         while (cpy_len) {
876                 hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
877                                 buf_iova + buf_offset, cpy_len, &mapped_len);
878                 if (unlikely(!hpa)) {
879                         VHOST_LOG_DATA(ERR, "(%d) %s: failed to get hpa.\n", dev->vid, __func__);
880                         return -1;
881                 }
882
883                 if (unlikely(async_iter_add_iovec(async,
884                                                 (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
885                                                         mbuf_offset),
886                                                 hpa, (size_t)mapped_len)))
887                         return -1;
888
889                 cpy_len -= (uint32_t)mapped_len;
890                 mbuf_offset += (uint32_t)mapped_len;
891                 buf_offset += (uint32_t)mapped_len;
892         }
893
894         return 0;
895 }
896
897 static __rte_always_inline int
898 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
899                             struct rte_mbuf *m, struct buf_vector *buf_vec,
900                             uint16_t nr_vec, uint16_t num_buffers)
901 {
902         uint32_t vec_idx = 0;
903         uint32_t mbuf_offset, mbuf_avail;
904         uint32_t buf_offset, buf_avail;
905         uint64_t buf_addr, buf_iova, buf_len;
906         uint32_t cpy_len;
907         uint64_t hdr_addr;
908         struct rte_mbuf *hdr_mbuf;
909         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
910         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
911         int error = 0;
912
913         if (unlikely(m == NULL)) {
914                 error = -1;
915                 goto out;
916         }
917
918         buf_addr = buf_vec[vec_idx].buf_addr;
919         buf_iova = buf_vec[vec_idx].buf_iova;
920         buf_len = buf_vec[vec_idx].buf_len;
921
922         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
923                 error = -1;
924                 goto out;
925         }
926
927         hdr_mbuf = m;
928         hdr_addr = buf_addr;
929         if (unlikely(buf_len < dev->vhost_hlen)) {
930                 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
931                 hdr = &tmp_hdr;
932         } else
933                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
934
935         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
936                 dev->vid, num_buffers);
937
938         if (unlikely(buf_len < dev->vhost_hlen)) {
939                 buf_offset = dev->vhost_hlen - buf_len;
940                 vec_idx++;
941                 buf_addr = buf_vec[vec_idx].buf_addr;
942                 buf_iova = buf_vec[vec_idx].buf_iova;
943                 buf_len = buf_vec[vec_idx].buf_len;
944                 buf_avail = buf_len - buf_offset;
945         } else {
946                 buf_offset = dev->vhost_hlen;
947                 buf_avail = buf_len - dev->vhost_hlen;
948         }
949
950         mbuf_avail  = rte_pktmbuf_data_len(m);
951         mbuf_offset = 0;
952         while (mbuf_avail != 0 || m->next != NULL) {
953                 /* done with current buf, get the next one */
954                 if (buf_avail == 0) {
955                         vec_idx++;
956                         if (unlikely(vec_idx >= nr_vec)) {
957                                 error = -1;
958                                 goto out;
959                         }
960
961                         buf_addr = buf_vec[vec_idx].buf_addr;
962                         buf_iova = buf_vec[vec_idx].buf_iova;
963                         buf_len = buf_vec[vec_idx].buf_len;
964
965                         buf_offset = 0;
966                         buf_avail  = buf_len;
967                 }
968
969                 /* done with current mbuf, get the next one */
970                 if (mbuf_avail == 0) {
971                         m = m->next;
972
973                         mbuf_offset = 0;
974                         mbuf_avail  = rte_pktmbuf_data_len(m);
975                 }
976
977                 if (hdr_addr) {
978                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
979                         if (rxvq_is_mergeable(dev))
980                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
981                                                 num_buffers);
982
983                         if (unlikely(hdr == &tmp_hdr)) {
984                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
985                         } else {
986                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
987                                                 dev->vhost_hlen, 0);
988                                 vhost_log_cache_write_iova(dev, vq,
989                                                 buf_vec[0].buf_iova,
990                                                 dev->vhost_hlen);
991                         }
992
993                         hdr_addr = 0;
994                 }
995
996                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
997
998                 if (likely(cpy_len > MAX_BATCH_LEN ||
999                                         vq->batch_copy_nb_elems >= vq->size)) {
1000                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
1001                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
1002                                 cpy_len);
1003                         vhost_log_cache_write_iova(dev, vq,
1004                                                    buf_iova + buf_offset,
1005                                                    cpy_len);
1006                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
1007                                 cpy_len, 0);
1008                 } else {
1009                         batch_copy[vq->batch_copy_nb_elems].dst =
1010                                 (void *)((uintptr_t)(buf_addr + buf_offset));
1011                         batch_copy[vq->batch_copy_nb_elems].src =
1012                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
1013                         batch_copy[vq->batch_copy_nb_elems].log_addr =
1014                                 buf_iova + buf_offset;
1015                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
1016                         vq->batch_copy_nb_elems++;
1017                 }
1018
1019                 mbuf_avail  -= cpy_len;
1020                 mbuf_offset += cpy_len;
1021                 buf_avail  -= cpy_len;
1022                 buf_offset += cpy_len;
1023         }
1024
1025 out:
1026
1027         return error;
1028 }
1029
1030 static __rte_always_inline int
1031 async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
1032                         struct rte_mbuf *m, struct buf_vector *buf_vec,
1033                         uint16_t nr_vec, uint16_t num_buffers)
1034 {
1035         struct vhost_async *async = vq->async;
1036         struct rte_mbuf *hdr_mbuf;
1037         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
1038         uint64_t buf_addr, buf_iova;
1039         uint64_t hdr_addr;
1040         uint32_t vec_idx = 0;
1041         uint32_t mbuf_offset, mbuf_avail;
1042         uint32_t buf_offset, buf_avail;
1043         uint32_t cpy_len, buf_len;
1044
1045         if (unlikely(m == NULL))
1046                 return -1;
1047
1048         buf_addr = buf_vec[vec_idx].buf_addr;
1049         buf_iova = buf_vec[vec_idx].buf_iova;
1050         buf_len = buf_vec[vec_idx].buf_len;
1051
1052         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
1053                 return -1;
1054
1055         hdr_mbuf = m;
1056         hdr_addr = buf_addr;
1057         if (unlikely(buf_len < dev->vhost_hlen)) {
1058                 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
1059                 hdr = &tmp_hdr;
1060         } else
1061                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
1062
1063         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
1064                 dev->vid, num_buffers);
1065
1066         if (unlikely(buf_len < dev->vhost_hlen)) {
1067                 buf_offset = dev->vhost_hlen - buf_len;
1068                 vec_idx++;
1069                 buf_addr = buf_vec[vec_idx].buf_addr;
1070                 buf_iova = buf_vec[vec_idx].buf_iova;
1071                 buf_len = buf_vec[vec_idx].buf_len;
1072                 buf_avail = buf_len - buf_offset;
1073         } else {
1074                 buf_offset = dev->vhost_hlen;
1075                 buf_avail = buf_len - dev->vhost_hlen;
1076         }
1077
1078         mbuf_avail  = rte_pktmbuf_data_len(m);
1079         mbuf_offset = 0;
1080
1081         if (async_iter_initialize(async))
1082                 return -1;
1083
1084         while (mbuf_avail != 0 || m->next != NULL) {
1085                 /* done with current buf, get the next one */
1086                 if (buf_avail == 0) {
1087                         vec_idx++;
1088                         if (unlikely(vec_idx >= nr_vec))
1089                                 goto error;
1090
1091                         buf_addr = buf_vec[vec_idx].buf_addr;
1092                         buf_iova = buf_vec[vec_idx].buf_iova;
1093                         buf_len = buf_vec[vec_idx].buf_len;
1094
1095                         buf_offset = 0;
1096                         buf_avail = buf_len;
1097                 }
1098
1099                 /* done with current mbuf, get the next one */
1100                 if (mbuf_avail == 0) {
1101                         m = m->next;
1102
1103                         mbuf_offset = 0;
1104                         mbuf_avail = rte_pktmbuf_data_len(m);
1105                 }
1106
1107                 if (hdr_addr) {
1108                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1109                         if (rxvq_is_mergeable(dev))
1110                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1111                                                 num_buffers);
1112
1113                         if (unlikely(hdr == &tmp_hdr)) {
1114                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1115                         } else {
1116                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1117                                                 dev->vhost_hlen, 0);
1118                                 vhost_log_cache_write_iova(dev, vq,
1119                                                 buf_vec[0].buf_iova,
1120                                                 dev->vhost_hlen);
1121                         }
1122
1123                         hdr_addr = 0;
1124                 }
1125
1126                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1127
1128                 if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
1129                                         buf_iova + buf_offset, cpy_len) < 0) {
1130                         goto error;
1131                 }
1132
1133                 mbuf_avail  -= cpy_len;
1134                 mbuf_offset += cpy_len;
1135                 buf_avail  -= cpy_len;
1136                 buf_offset += cpy_len;
1137         }
1138
1139         async_iter_finalize(async);
1140
1141         return 0;
1142 error:
1143         async_iter_cancel(async);
1144
1145         return -1;
1146 }
1147
1148 static __rte_always_inline int
1149 vhost_enqueue_single_packed(struct virtio_net *dev,
1150                             struct vhost_virtqueue *vq,
1151                             struct rte_mbuf *pkt,
1152                             struct buf_vector *buf_vec,
1153                             uint16_t *nr_descs)
1154 {
1155         uint16_t nr_vec = 0;
1156         uint16_t avail_idx = vq->last_avail_idx;
1157         uint16_t max_tries, tries = 0;
1158         uint16_t buf_id = 0;
1159         uint32_t len = 0;
1160         uint16_t desc_count;
1161         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1162         uint16_t num_buffers = 0;
1163         uint32_t buffer_len[vq->size];
1164         uint16_t buffer_buf_id[vq->size];
1165         uint16_t buffer_desc_count[vq->size];
1166
1167         if (rxvq_is_mergeable(dev))
1168                 max_tries = vq->size - 1;
1169         else
1170                 max_tries = 1;
1171
1172         while (size > 0) {
1173                 /*
1174                  * if we tried all available ring items, and still
1175                  * can't get enough buf, it means something abnormal
1176                  * happened.
1177                  */
1178                 if (unlikely(++tries > max_tries))
1179                         return -1;
1180
1181                 if (unlikely(fill_vec_buf_packed(dev, vq,
1182                                                 avail_idx, &desc_count,
1183                                                 buf_vec, &nr_vec,
1184                                                 &buf_id, &len,
1185                                                 VHOST_ACCESS_RW) < 0))
1186                         return -1;
1187
1188                 len = RTE_MIN(len, size);
1189                 size -= len;
1190
1191                 buffer_len[num_buffers] = len;
1192                 buffer_buf_id[num_buffers] = buf_id;
1193                 buffer_desc_count[num_buffers] = desc_count;
1194                 num_buffers += 1;
1195
1196                 *nr_descs += desc_count;
1197                 avail_idx += desc_count;
1198                 if (avail_idx >= vq->size)
1199                         avail_idx -= vq->size;
1200         }
1201
1202         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
1203                 return -1;
1204
1205         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1206                                            buffer_desc_count, num_buffers);
1207
1208         return 0;
1209 }
1210
1211 static __rte_noinline uint32_t
1212 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1213         struct rte_mbuf **pkts, uint32_t count)
1214 {
1215         uint32_t pkt_idx = 0;
1216         uint16_t num_buffers;
1217         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1218         uint16_t avail_head;
1219
1220         /*
1221          * The ordering between avail index and
1222          * desc reads needs to be enforced.
1223          */
1224         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1225
1226         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1227
1228         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1229                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1230                 uint16_t nr_vec = 0;
1231
1232                 if (unlikely(reserve_avail_buf_split(dev, vq,
1233                                                 pkt_len, buf_vec, &num_buffers,
1234                                                 avail_head, &nr_vec) < 0)) {
1235                         VHOST_LOG_DATA(DEBUG,
1236                                 "(%d) failed to get enough desc from vring\n",
1237                                 dev->vid);
1238                         vq->shadow_used_idx -= num_buffers;
1239                         break;
1240                 }
1241
1242                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1243                         dev->vid, vq->last_avail_idx,
1244                         vq->last_avail_idx + num_buffers);
1245
1246                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1247                                                 buf_vec, nr_vec,
1248                                                 num_buffers) < 0) {
1249                         vq->shadow_used_idx -= num_buffers;
1250                         break;
1251                 }
1252
1253                 vq->last_avail_idx += num_buffers;
1254         }
1255
1256         do_data_copy_enqueue(dev, vq);
1257
1258         if (likely(vq->shadow_used_idx)) {
1259                 flush_shadow_used_ring_split(dev, vq);
1260                 vhost_vring_call_split(dev, vq);
1261         }
1262
1263         return pkt_idx;
1264 }
1265
1266 static __rte_always_inline int
1267 virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
1268                            struct vhost_virtqueue *vq,
1269                            struct rte_mbuf **pkts,
1270                            uint64_t *desc_addrs,
1271                            uint64_t *lens)
1272 {
1273         bool wrap_counter = vq->avail_wrap_counter;
1274         struct vring_packed_desc *descs = vq->desc_packed;
1275         uint16_t avail_idx = vq->last_avail_idx;
1276         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1277         uint16_t i;
1278
1279         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1280                 return -1;
1281
1282         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1283                 return -1;
1284
1285         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1286                 if (unlikely(pkts[i]->next != NULL))
1287                         return -1;
1288                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1289                                             wrap_counter)))
1290                         return -1;
1291         }
1292
1293         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1294                 lens[i] = descs[avail_idx + i].len;
1295
1296         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1297                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1298                         return -1;
1299         }
1300
1301         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1302                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1303                                                   descs[avail_idx + i].addr,
1304                                                   &lens[i],
1305                                                   VHOST_ACCESS_RW);
1306
1307         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1308                 if (unlikely(!desc_addrs[i]))
1309                         return -1;
1310                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1311                         return -1;
1312         }
1313
1314         return 0;
1315 }
1316
1317 static __rte_always_inline void
1318 virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
1319                            struct vhost_virtqueue *vq,
1320                            struct rte_mbuf **pkts,
1321                            uint64_t *desc_addrs,
1322                            uint64_t *lens)
1323 {
1324         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1325         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1326         struct vring_packed_desc *descs = vq->desc_packed;
1327         uint16_t avail_idx = vq->last_avail_idx;
1328         uint16_t ids[PACKED_BATCH_SIZE];
1329         uint16_t i;
1330
1331         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1332                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1333                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1334                                         (uintptr_t)desc_addrs[i];
1335                 lens[i] = pkts[i]->pkt_len +
1336                         sizeof(struct virtio_net_hdr_mrg_rxbuf);
1337         }
1338
1339         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1340                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1341
1342         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1343
1344         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1345                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1346                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1347                            pkts[i]->pkt_len);
1348         }
1349
1350         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1351                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1352                                            lens[i]);
1353
1354         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1355                 ids[i] = descs[avail_idx + i].id;
1356
1357         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1358 }
1359
1360 static __rte_always_inline int
1361 virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
1362                            struct vhost_virtqueue *vq,
1363                            struct rte_mbuf **pkts)
1364 {
1365         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1366         uint64_t lens[PACKED_BATCH_SIZE];
1367
1368         if (virtio_dev_rx_sync_batch_check(dev, vq, pkts, desc_addrs, lens) == -1)
1369                 return -1;
1370
1371         if (vq->shadow_used_idx) {
1372                 do_data_copy_enqueue(dev, vq);
1373                 vhost_flush_enqueue_shadow_packed(dev, vq);
1374         }
1375
1376         virtio_dev_rx_batch_packed_copy(dev, vq, pkts, desc_addrs, lens);
1377
1378         return 0;
1379 }
1380
1381 static __rte_always_inline int16_t
1382 virtio_dev_rx_single_packed(struct virtio_net *dev,
1383                             struct vhost_virtqueue *vq,
1384                             struct rte_mbuf *pkt)
1385 {
1386         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1387         uint16_t nr_descs = 0;
1388
1389         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1390                                                  &nr_descs) < 0)) {
1391                 VHOST_LOG_DATA(DEBUG,
1392                                 "(%d) failed to get enough desc from vring\n",
1393                                 dev->vid);
1394                 return -1;
1395         }
1396
1397         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1398                         dev->vid, vq->last_avail_idx,
1399                         vq->last_avail_idx + nr_descs);
1400
1401         vq_inc_last_avail_packed(vq, nr_descs);
1402
1403         return 0;
1404 }
1405
1406 static __rte_noinline uint32_t
1407 virtio_dev_rx_packed(struct virtio_net *dev,
1408                      struct vhost_virtqueue *__rte_restrict vq,
1409                      struct rte_mbuf **__rte_restrict pkts,
1410                      uint32_t count)
1411 {
1412         uint32_t pkt_idx = 0;
1413
1414         do {
1415                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1416
1417                 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
1418                         if (!virtio_dev_rx_sync_batch_packed(dev, vq,
1419                                                         &pkts[pkt_idx])) {
1420                                 pkt_idx += PACKED_BATCH_SIZE;
1421                                 continue;
1422                         }
1423                 }
1424
1425                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1426                         break;
1427                 pkt_idx++;
1428
1429         } while (pkt_idx < count);
1430
1431         if (vq->shadow_used_idx) {
1432                 do_data_copy_enqueue(dev, vq);
1433                 vhost_flush_enqueue_shadow_packed(dev, vq);
1434         }
1435
1436         if (pkt_idx)
1437                 vhost_vring_call_packed(dev, vq);
1438
1439         return pkt_idx;
1440 }
1441
1442 static __rte_always_inline uint32_t
1443 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1444         struct rte_mbuf **pkts, uint32_t count)
1445 {
1446         struct vhost_virtqueue *vq;
1447         uint32_t nb_tx = 0;
1448
1449         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1450         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1451                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1452                         dev->vid, __func__, queue_id);
1453                 return 0;
1454         }
1455
1456         vq = dev->virtqueue[queue_id];
1457
1458         rte_spinlock_lock(&vq->access_lock);
1459
1460         if (unlikely(!vq->enabled))
1461                 goto out_access_unlock;
1462
1463         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1464                 vhost_user_iotlb_rd_lock(vq);
1465
1466         if (unlikely(!vq->access_ok))
1467                 if (unlikely(vring_translate(dev, vq) < 0))
1468                         goto out;
1469
1470         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1471         if (count == 0)
1472                 goto out;
1473
1474         if (vq_is_packed(dev))
1475                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1476         else
1477                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1478
1479 out:
1480         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1481                 vhost_user_iotlb_rd_unlock(vq);
1482
1483 out_access_unlock:
1484         rte_spinlock_unlock(&vq->access_lock);
1485
1486         return nb_tx;
1487 }
1488
1489 uint16_t
1490 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1491         struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1492 {
1493         struct virtio_net *dev = get_device(vid);
1494
1495         if (!dev)
1496                 return 0;
1497
1498         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1499                 VHOST_LOG_DATA(ERR,
1500                         "(%d) %s: built-in vhost net backend is disabled.\n",
1501                         dev->vid, __func__);
1502                 return 0;
1503         }
1504
1505         return virtio_dev_rx(dev, queue_id, pkts, count);
1506 }
1507
1508 static __rte_always_inline uint16_t
1509 async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
1510 {
1511         struct vhost_async *async = vq->async;
1512
1513         if (async->pkts_idx >= async->pkts_inflight_n)
1514                 return async->pkts_idx - async->pkts_inflight_n;
1515         else
1516                 return vq->size - async->pkts_inflight_n + async->pkts_idx;
1517 }
1518
1519 static __rte_always_inline void
1520 store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem *d_ring,
1521                 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1522 {
1523         size_t elem_size = sizeof(struct vring_used_elem);
1524
1525         if (d_idx + count <= ring_size) {
1526                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1527         } else {
1528                 uint16_t size = ring_size - d_idx;
1529
1530                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1531                 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1532         }
1533 }
1534
1535 static __rte_always_inline void
1536 store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
1537                 struct vring_used_elem_packed *d_ring,
1538                 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1539 {
1540         size_t elem_size = sizeof(struct vring_used_elem_packed);
1541
1542         if (d_idx + count <= ring_size) {
1543                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1544         } else {
1545                 uint16_t size = ring_size - d_idx;
1546
1547                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1548                 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1549         }
1550 }
1551
1552 static __rte_noinline uint32_t
1553 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
1554         struct vhost_virtqueue *vq, uint16_t queue_id,
1555         struct rte_mbuf **pkts, uint32_t count)
1556 {
1557         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1558         uint32_t pkt_idx = 0;
1559         uint16_t num_buffers;
1560         uint16_t avail_head;
1561
1562         struct vhost_async *async = vq->async;
1563         struct async_inflight_info *pkts_info = async->pkts_info;
1564         uint32_t pkt_err = 0;
1565         int32_t n_xfer;
1566         uint16_t slot_idx = 0;
1567
1568         /*
1569          * The ordering between avail index and desc reads need to be enforced.
1570          */
1571         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1572
1573         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1574
1575         async_iter_reset(async);
1576
1577         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1578                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1579                 uint16_t nr_vec = 0;
1580
1581                 if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
1582                                                 &num_buffers, avail_head, &nr_vec) < 0)) {
1583                         VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n",
1584                                         dev->vid);
1585                         vq->shadow_used_idx -= num_buffers;
1586                         break;
1587                 }
1588
1589                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1590                         dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
1591
1592                 if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers) < 0) {
1593                         vq->shadow_used_idx -= num_buffers;
1594                         break;
1595                 }
1596
1597                 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
1598                 pkts_info[slot_idx].descs = num_buffers;
1599                 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1600
1601                 vq->last_avail_idx += num_buffers;
1602         }
1603
1604         if (unlikely(pkt_idx == 0))
1605                 return 0;
1606
1607         n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
1608         if (unlikely(n_xfer < 0)) {
1609                 VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
1610                                 dev->vid, __func__, queue_id);
1611                 n_xfer = 0;
1612         }
1613
1614         pkt_err = pkt_idx - n_xfer;
1615         if (unlikely(pkt_err)) {
1616                 uint16_t num_descs = 0;
1617
1618                 /* update number of completed packets */
1619                 pkt_idx = n_xfer;
1620
1621                 /* calculate the sum of descriptors to revert */
1622                 while (pkt_err-- > 0) {
1623                         num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
1624                         slot_idx--;
1625                 }
1626
1627                 /* recover shadow used ring and available ring */
1628                 vq->shadow_used_idx -= num_descs;
1629                 vq->last_avail_idx -= num_descs;
1630         }
1631
1632         /* keep used descriptors */
1633         if (likely(vq->shadow_used_idx)) {
1634                 uint16_t to = async->desc_idx_split & (vq->size - 1);
1635
1636                 store_dma_desc_info_split(vq->shadow_used_split,
1637                                 async->descs_split, vq->size, 0, to,
1638                                 vq->shadow_used_idx);
1639
1640                 async->desc_idx_split += vq->shadow_used_idx;
1641
1642                 async->pkts_idx += pkt_idx;
1643                 if (async->pkts_idx >= vq->size)
1644                         async->pkts_idx -= vq->size;
1645
1646                 async->pkts_inflight_n += pkt_idx;
1647                 vq->shadow_used_idx = 0;
1648         }
1649
1650         return pkt_idx;
1651 }
1652
1653 static __rte_always_inline void
1654 vhost_update_used_packed(struct vhost_virtqueue *vq,
1655                         struct vring_used_elem_packed *shadow_ring,
1656                         uint16_t count)
1657 {
1658         int i;
1659         uint16_t used_idx = vq->last_used_idx;
1660         uint16_t head_idx = vq->last_used_idx;
1661         uint16_t head_flags = 0;
1662
1663         if (count == 0)
1664                 return;
1665
1666         /* Split loop in two to save memory barriers */
1667         for (i = 0; i < count; i++) {
1668                 vq->desc_packed[used_idx].id = shadow_ring[i].id;
1669                 vq->desc_packed[used_idx].len = shadow_ring[i].len;
1670
1671                 used_idx += shadow_ring[i].count;
1672                 if (used_idx >= vq->size)
1673                         used_idx -= vq->size;
1674         }
1675
1676         /* The ordering for storing desc flags needs to be enforced. */
1677         rte_atomic_thread_fence(__ATOMIC_RELEASE);
1678
1679         for (i = 0; i < count; i++) {
1680                 uint16_t flags;
1681
1682                 if (vq->shadow_used_packed[i].len)
1683                         flags = VRING_DESC_F_WRITE;
1684                 else
1685                         flags = 0;
1686
1687                 if (vq->used_wrap_counter) {
1688                         flags |= VRING_DESC_F_USED;
1689                         flags |= VRING_DESC_F_AVAIL;
1690                 } else {
1691                         flags &= ~VRING_DESC_F_USED;
1692                         flags &= ~VRING_DESC_F_AVAIL;
1693                 }
1694
1695                 if (i > 0) {
1696                         vq->desc_packed[vq->last_used_idx].flags = flags;
1697                 } else {
1698                         head_idx = vq->last_used_idx;
1699                         head_flags = flags;
1700                 }
1701
1702                 vq_inc_last_used_packed(vq, shadow_ring[i].count);
1703         }
1704
1705         vq->desc_packed[head_idx].flags = head_flags;
1706 }
1707
1708 static __rte_always_inline int
1709 vhost_enqueue_async_packed(struct virtio_net *dev,
1710                             struct vhost_virtqueue *vq,
1711                             struct rte_mbuf *pkt,
1712                             struct buf_vector *buf_vec,
1713                             uint16_t *nr_descs,
1714                             uint16_t *nr_buffers)
1715 {
1716         uint16_t nr_vec = 0;
1717         uint16_t avail_idx = vq->last_avail_idx;
1718         uint16_t max_tries, tries = 0;
1719         uint16_t buf_id = 0;
1720         uint32_t len = 0;
1721         uint16_t desc_count = 0;
1722         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1723         uint32_t buffer_len[vq->size];
1724         uint16_t buffer_buf_id[vq->size];
1725         uint16_t buffer_desc_count[vq->size];
1726
1727         if (rxvq_is_mergeable(dev))
1728                 max_tries = vq->size - 1;
1729         else
1730                 max_tries = 1;
1731
1732         while (size > 0) {
1733                 /*
1734                  * if we tried all available ring items, and still
1735                  * can't get enough buf, it means something abnormal
1736                  * happened.
1737                  */
1738                 if (unlikely(++tries > max_tries))
1739                         return -1;
1740
1741                 if (unlikely(fill_vec_buf_packed(dev, vq,
1742                                                 avail_idx, &desc_count,
1743                                                 buf_vec, &nr_vec,
1744                                                 &buf_id, &len,
1745                                                 VHOST_ACCESS_RW) < 0))
1746                         return -1;
1747
1748                 len = RTE_MIN(len, size);
1749                 size -= len;
1750
1751                 buffer_len[*nr_buffers] = len;
1752                 buffer_buf_id[*nr_buffers] = buf_id;
1753                 buffer_desc_count[*nr_buffers] = desc_count;
1754                 *nr_buffers += 1;
1755                 *nr_descs += desc_count;
1756                 avail_idx += desc_count;
1757                 if (avail_idx >= vq->size)
1758                         avail_idx -= vq->size;
1759         }
1760
1761         if (unlikely(async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec,
1762                                         *nr_buffers) < 0))
1763                 return -1;
1764
1765         vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
1766
1767         return 0;
1768 }
1769
1770 static __rte_always_inline int16_t
1771 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1772                             struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
1773 {
1774         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1775
1776         if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
1777                                         nr_descs, nr_buffers) < 0)) {
1778                 VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n", dev->vid);
1779                 return -1;
1780         }
1781
1782         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1783                         dev->vid, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
1784
1785         return 0;
1786 }
1787
1788 static __rte_always_inline void
1789 dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
1790                         uint32_t nr_err, uint32_t *pkt_idx)
1791 {
1792         uint16_t descs_err = 0;
1793         uint16_t buffers_err = 0;
1794         struct async_inflight_info *pkts_info = vq->async->pkts_info;
1795
1796         *pkt_idx -= nr_err;
1797         /* calculate the sum of buffers and descs of DMA-error packets. */
1798         while (nr_err-- > 0) {
1799                 descs_err += pkts_info[slot_idx % vq->size].descs;
1800                 buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
1801                 slot_idx--;
1802         }
1803
1804         if (vq->last_avail_idx >= descs_err) {
1805                 vq->last_avail_idx -= descs_err;
1806         } else {
1807                 vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
1808                 vq->avail_wrap_counter ^= 1;
1809         }
1810
1811         vq->shadow_used_idx -= buffers_err;
1812 }
1813
1814 static __rte_noinline uint32_t
1815 virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
1816         struct vhost_virtqueue *vq, uint16_t queue_id,
1817         struct rte_mbuf **pkts, uint32_t count)
1818 {
1819         uint32_t pkt_idx = 0;
1820         uint32_t remained = count;
1821         int32_t n_xfer;
1822         uint16_t num_buffers;
1823         uint16_t num_descs;
1824
1825         struct vhost_async *async = vq->async;
1826         struct async_inflight_info *pkts_info = async->pkts_info;
1827         uint32_t pkt_err = 0;
1828         uint16_t slot_idx = 0;
1829
1830         do {
1831                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1832
1833                 num_buffers = 0;
1834                 num_descs = 0;
1835                 if (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],
1836                                                 &num_descs, &num_buffers) < 0))
1837                         break;
1838
1839                 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
1840
1841                 pkts_info[slot_idx].descs = num_descs;
1842                 pkts_info[slot_idx].nr_buffers = num_buffers;
1843                 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1844
1845                 pkt_idx++;
1846                 remained--;
1847                 vq_inc_last_avail_packed(vq, num_descs);
1848         } while (pkt_idx < count);
1849
1850         if (unlikely(pkt_idx == 0))
1851                 return 0;
1852
1853         n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
1854         if (unlikely(n_xfer < 0)) {
1855                 VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
1856                                 dev->vid, __func__, queue_id);
1857                 n_xfer = 0;
1858         }
1859
1860         pkt_err = pkt_idx - n_xfer;
1861
1862         async_iter_reset(async);
1863
1864         if (unlikely(pkt_err))
1865                 dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
1866
1867         if (likely(vq->shadow_used_idx)) {
1868                 /* keep used descriptors. */
1869                 store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
1870                                         vq->size, 0, async->buffer_idx_packed,
1871                                         vq->shadow_used_idx);
1872
1873                 async->buffer_idx_packed += vq->shadow_used_idx;
1874                 if (async->buffer_idx_packed >= vq->size)
1875                         async->buffer_idx_packed -= vq->size;
1876
1877                 async->pkts_idx += pkt_idx;
1878                 if (async->pkts_idx >= vq->size)
1879                         async->pkts_idx -= vq->size;
1880
1881                 vq->shadow_used_idx = 0;
1882                 async->pkts_inflight_n += pkt_idx;
1883         }
1884
1885         return pkt_idx;
1886 }
1887
1888 static __rte_always_inline void
1889 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
1890 {
1891         struct vhost_async *async = vq->async;
1892         uint16_t nr_left = n_descs;
1893         uint16_t nr_copy;
1894         uint16_t to, from;
1895
1896         do {
1897                 from = async->last_desc_idx_split & (vq->size - 1);
1898                 nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
1899                 to = vq->last_used_idx & (vq->size - 1);
1900
1901                 if (to + nr_copy <= vq->size) {
1902                         rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1903                                         nr_copy * sizeof(struct vring_used_elem));
1904                 } else {
1905                         uint16_t size = vq->size - to;
1906
1907                         rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1908                                         size * sizeof(struct vring_used_elem));
1909                         rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
1910                                         (nr_copy - size) * sizeof(struct vring_used_elem));
1911                 }
1912
1913                 async->last_desc_idx_split += nr_copy;
1914                 vq->last_used_idx += nr_copy;
1915                 nr_left -= nr_copy;
1916         } while (nr_left > 0);
1917 }
1918
1919 static __rte_always_inline void
1920 write_back_completed_descs_packed(struct vhost_virtqueue *vq,
1921                                 uint16_t n_buffers)
1922 {
1923         struct vhost_async *async = vq->async;
1924         uint16_t nr_left = n_buffers;
1925         uint16_t from, to;
1926
1927         do {
1928                 from = async->last_buffer_idx_packed;
1929                 to = (from + nr_left) % vq->size;
1930                 if (to > from) {
1931                         vhost_update_used_packed(vq, async->buffers_packed + from, to - from);
1932                         async->last_buffer_idx_packed += nr_left;
1933                         nr_left = 0;
1934                 } else {
1935                         vhost_update_used_packed(vq, async->buffers_packed + from,
1936                                 vq->size - from);
1937                         async->last_buffer_idx_packed = 0;
1938                         nr_left -= vq->size - from;
1939                 }
1940         } while (nr_left > 0);
1941 }
1942
1943 static __rte_always_inline uint16_t
1944 vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
1945                 struct rte_mbuf **pkts, uint16_t count)
1946 {
1947         struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
1948         struct vhost_async *async = vq->async;
1949         struct async_inflight_info *pkts_info = async->pkts_info;
1950         int32_t n_cpl;
1951         uint16_t n_descs = 0, n_buffers = 0;
1952         uint16_t start_idx, from, i;
1953
1954         n_cpl = async->ops.check_completed_copies(dev->vid, queue_id, 0, count);
1955         if (unlikely(n_cpl < 0)) {
1956                 VHOST_LOG_DATA(ERR, "(%d) %s: failed to check completed copies for queue id %d.\n",
1957                                 dev->vid, __func__, queue_id);
1958                 return 0;
1959         }
1960
1961         if (n_cpl == 0)
1962                 return 0;
1963
1964         start_idx = async_get_first_inflight_pkt_idx(vq);
1965
1966         for (i = 0; i < n_cpl; i++) {
1967                 from = (start_idx + i) % vq->size;
1968                 /* Only used with packed ring */
1969                 n_buffers += pkts_info[from].nr_buffers;
1970                 /* Only used with split ring */
1971                 n_descs += pkts_info[from].descs;
1972                 pkts[i] = pkts_info[from].mbuf;
1973         }
1974
1975         async->pkts_inflight_n -= n_cpl;
1976
1977         if (likely(vq->enabled && vq->access_ok)) {
1978                 if (vq_is_packed(dev)) {
1979                         write_back_completed_descs_packed(vq, n_buffers);
1980                         vhost_vring_call_packed(dev, vq);
1981                 } else {
1982                         write_back_completed_descs_split(vq, n_descs);
1983                         __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
1984                         vhost_vring_call_split(dev, vq);
1985                 }
1986         } else {
1987                 if (vq_is_packed(dev)) {
1988                         async->last_buffer_idx_packed += n_buffers;
1989                         if (async->last_buffer_idx_packed >= vq->size)
1990                                 async->last_buffer_idx_packed -= vq->size;
1991                 } else {
1992                         async->last_desc_idx_split += n_descs;
1993                 }
1994         }
1995
1996         return n_cpl;
1997 }
1998
1999 uint16_t
2000 rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
2001                 struct rte_mbuf **pkts, uint16_t count)
2002 {
2003         struct virtio_net *dev = get_device(vid);
2004         struct vhost_virtqueue *vq;
2005         uint16_t n_pkts_cpl = 0;
2006
2007         if (unlikely(!dev))
2008                 return 0;
2009
2010         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2011         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2012                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2013                         dev->vid, __func__, queue_id);
2014                 return 0;
2015         }
2016
2017         vq = dev->virtqueue[queue_id];
2018
2019         if (unlikely(!vq->async)) {
2020                 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
2021                         dev->vid, __func__, queue_id);
2022                 return 0;
2023         }
2024
2025         rte_spinlock_lock(&vq->access_lock);
2026
2027         n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
2028
2029         rte_spinlock_unlock(&vq->access_lock);
2030
2031         return n_pkts_cpl;
2032 }
2033
2034 uint16_t
2035 rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
2036                 struct rte_mbuf **pkts, uint16_t count)
2037 {
2038         struct virtio_net *dev = get_device(vid);
2039         struct vhost_virtqueue *vq;
2040         uint16_t n_pkts_cpl = 0;
2041
2042         if (!dev)
2043                 return 0;
2044
2045         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2046         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2047                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2048                         dev->vid, __func__, queue_id);
2049                 return 0;
2050         }
2051
2052         vq = dev->virtqueue[queue_id];
2053
2054         if (unlikely(!vq->async)) {
2055                 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
2056                         dev->vid, __func__, queue_id);
2057                 return 0;
2058         }
2059
2060         n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
2061
2062         return n_pkts_cpl;
2063 }
2064
2065 static __rte_always_inline uint32_t
2066 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
2067         struct rte_mbuf **pkts, uint32_t count)
2068 {
2069         struct vhost_virtqueue *vq;
2070         uint32_t nb_tx = 0;
2071
2072         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2073         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2074                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2075                         dev->vid, __func__, queue_id);
2076                 return 0;
2077         }
2078
2079         vq = dev->virtqueue[queue_id];
2080
2081         rte_spinlock_lock(&vq->access_lock);
2082
2083         if (unlikely(!vq->enabled || !vq->async))
2084                 goto out_access_unlock;
2085
2086         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2087                 vhost_user_iotlb_rd_lock(vq);
2088
2089         if (unlikely(!vq->access_ok))
2090                 if (unlikely(vring_translate(dev, vq) < 0))
2091                         goto out;
2092
2093         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
2094         if (count == 0)
2095                 goto out;
2096
2097         if (vq_is_packed(dev))
2098                 nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, queue_id,
2099                                 pkts, count);
2100         else
2101                 nb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,
2102                                 pkts, count);
2103
2104 out:
2105         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2106                 vhost_user_iotlb_rd_unlock(vq);
2107
2108 out_access_unlock:
2109         rte_spinlock_unlock(&vq->access_lock);
2110
2111         return nb_tx;
2112 }
2113
2114 uint16_t
2115 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
2116                 struct rte_mbuf **pkts, uint16_t count)
2117 {
2118         struct virtio_net *dev = get_device(vid);
2119
2120         if (!dev)
2121                 return 0;
2122
2123         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2124                 VHOST_LOG_DATA(ERR,
2125                         "(%d) %s: built-in vhost net backend is disabled.\n",
2126                         dev->vid, __func__);
2127                 return 0;
2128         }
2129
2130         return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
2131 }
2132
2133 static inline bool
2134 virtio_net_with_host_offload(struct virtio_net *dev)
2135 {
2136         if (dev->features &
2137                         ((1ULL << VIRTIO_NET_F_CSUM) |
2138                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
2139                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2140                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
2141                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
2142                 return true;
2143
2144         return false;
2145 }
2146
2147 static int
2148 parse_headers(struct rte_mbuf *m, uint8_t *l4_proto)
2149 {
2150         struct rte_ipv4_hdr *ipv4_hdr;
2151         struct rte_ipv6_hdr *ipv6_hdr;
2152         struct rte_ether_hdr *eth_hdr;
2153         uint16_t ethertype;
2154         uint16_t data_len = rte_pktmbuf_data_len(m);
2155
2156         if (data_len < sizeof(struct rte_ether_hdr))
2157                 return -EINVAL;
2158
2159         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2160
2161         m->l2_len = sizeof(struct rte_ether_hdr);
2162         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
2163
2164         if (ethertype == RTE_ETHER_TYPE_VLAN) {
2165                 if (data_len < sizeof(struct rte_ether_hdr) +
2166                                 sizeof(struct rte_vlan_hdr))
2167                         goto error;
2168
2169                 struct rte_vlan_hdr *vlan_hdr =
2170                         (struct rte_vlan_hdr *)(eth_hdr + 1);
2171
2172                 m->l2_len += sizeof(struct rte_vlan_hdr);
2173                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
2174         }
2175
2176         switch (ethertype) {
2177         case RTE_ETHER_TYPE_IPV4:
2178                 if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))
2179                         goto error;
2180                 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2181                                 m->l2_len);
2182                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
2183                 if (data_len < m->l2_len + m->l3_len)
2184                         goto error;
2185                 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
2186                 *l4_proto = ipv4_hdr->next_proto_id;
2187                 break;
2188         case RTE_ETHER_TYPE_IPV6:
2189                 if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
2190                         goto error;
2191                 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
2192                                 m->l2_len);
2193                 m->l3_len = sizeof(struct rte_ipv6_hdr);
2194                 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
2195                 *l4_proto = ipv6_hdr->proto;
2196                 break;
2197         default:
2198                 /* a valid L3 header is needed for further L4 parsing */
2199                 goto error;
2200         }
2201
2202         /* both CSUM and GSO need a valid L4 header */
2203         switch (*l4_proto) {
2204         case IPPROTO_TCP:
2205                 if (data_len < m->l2_len + m->l3_len +
2206                                 sizeof(struct rte_tcp_hdr))
2207                         goto error;
2208                 break;
2209         case IPPROTO_UDP:
2210                 if (data_len < m->l2_len + m->l3_len +
2211                                 sizeof(struct rte_udp_hdr))
2212                         goto error;
2213                 break;
2214         case IPPROTO_SCTP:
2215                 if (data_len < m->l2_len + m->l3_len +
2216                                 sizeof(struct rte_sctp_hdr))
2217                         goto error;
2218                 break;
2219         default:
2220                 goto error;
2221         }
2222
2223         return 0;
2224
2225 error:
2226         m->l2_len = 0;
2227         m->l3_len = 0;
2228         m->ol_flags = 0;
2229         return -EINVAL;
2230 }
2231
2232 static __rte_always_inline void
2233 vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
2234 {
2235         uint8_t l4_proto = 0;
2236         struct rte_tcp_hdr *tcp_hdr = NULL;
2237         uint16_t tcp_len;
2238         uint16_t data_len = rte_pktmbuf_data_len(m);
2239
2240         if (parse_headers(m, &l4_proto) < 0)
2241                 return;
2242
2243         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2244                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
2245                         switch (hdr->csum_offset) {
2246                         case (offsetof(struct rte_tcp_hdr, cksum)):
2247                                 if (l4_proto != IPPROTO_TCP)
2248                                         goto error;
2249                                 m->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
2250                                 break;
2251                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
2252                                 if (l4_proto != IPPROTO_UDP)
2253                                         goto error;
2254                                 m->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
2255                                 break;
2256                         case (offsetof(struct rte_sctp_hdr, cksum)):
2257                                 if (l4_proto != IPPROTO_SCTP)
2258                                         goto error;
2259                                 m->ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
2260                                 break;
2261                         default:
2262                                 goto error;
2263                         }
2264                 } else {
2265                         goto error;
2266                 }
2267         }
2268
2269         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2270                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2271                 case VIRTIO_NET_HDR_GSO_TCPV4:
2272                 case VIRTIO_NET_HDR_GSO_TCPV6:
2273                         if (l4_proto != IPPROTO_TCP)
2274                                 goto error;
2275                         tcp_hdr = rte_pktmbuf_mtod_offset(m,
2276                                         struct rte_tcp_hdr *,
2277                                         m->l2_len + m->l3_len);
2278                         tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
2279                         if (data_len < m->l2_len + m->l3_len + tcp_len)
2280                                 goto error;
2281                         m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
2282                         m->tso_segsz = hdr->gso_size;
2283                         m->l4_len = tcp_len;
2284                         break;
2285                 case VIRTIO_NET_HDR_GSO_UDP:
2286                         if (l4_proto != IPPROTO_UDP)
2287                                 goto error;
2288                         m->ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
2289                         m->tso_segsz = hdr->gso_size;
2290                         m->l4_len = sizeof(struct rte_udp_hdr);
2291                         break;
2292                 default:
2293                         VHOST_LOG_DATA(WARNING,
2294                                 "unsupported gso type %u.\n", hdr->gso_type);
2295                         goto error;
2296                 }
2297         }
2298         return;
2299
2300 error:
2301         m->l2_len = 0;
2302         m->l3_len = 0;
2303         m->ol_flags = 0;
2304 }
2305
2306 static __rte_always_inline void
2307 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m,
2308         bool legacy_ol_flags)
2309 {
2310         struct rte_net_hdr_lens hdr_lens;
2311         int l4_supported = 0;
2312         uint32_t ptype;
2313
2314         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
2315                 return;
2316
2317         if (legacy_ol_flags) {
2318                 vhost_dequeue_offload_legacy(hdr, m);
2319                 return;
2320         }
2321
2322         m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
2323
2324         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
2325         m->packet_type = ptype;
2326         if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
2327             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
2328             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
2329                 l4_supported = 1;
2330
2331         /* According to Virtio 1.1 spec, the device only needs to look at
2332          * VIRTIO_NET_HDR_F_NEEDS_CSUM in the packet transmission path.
2333          * This differs from the processing incoming packets path where the
2334          * driver could rely on VIRTIO_NET_HDR_F_DATA_VALID flag set by the
2335          * device.
2336          *
2337          * 5.1.6.2.1 Driver Requirements: Packet Transmission
2338          * The driver MUST NOT set the VIRTIO_NET_HDR_F_DATA_VALID and
2339          * VIRTIO_NET_HDR_F_RSC_INFO bits in flags.
2340          *
2341          * 5.1.6.2.2 Device Requirements: Packet Transmission
2342          * The device MUST ignore flag bits that it does not recognize.
2343          */
2344         if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2345                 uint32_t hdrlen;
2346
2347                 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
2348                 if (hdr->csum_start <= hdrlen && l4_supported != 0) {
2349                         m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
2350                 } else {
2351                         /* Unknown proto or tunnel, do sw cksum. We can assume
2352                          * the cksum field is in the first segment since the
2353                          * buffers we provided to the host are large enough.
2354                          * In case of SCTP, this will be wrong since it's a CRC
2355                          * but there's nothing we can do.
2356                          */
2357                         uint16_t csum = 0, off;
2358
2359                         if (rte_raw_cksum_mbuf(m, hdr->csum_start,
2360                                         rte_pktmbuf_pkt_len(m) - hdr->csum_start, &csum) < 0)
2361                                 return;
2362                         if (likely(csum != 0xffff))
2363                                 csum = ~csum;
2364                         off = hdr->csum_offset + hdr->csum_start;
2365                         if (rte_pktmbuf_data_len(m) >= off + 1)
2366                                 *rte_pktmbuf_mtod_offset(m, uint16_t *, off) = csum;
2367                 }
2368         }
2369
2370         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2371                 if (hdr->gso_size == 0)
2372                         return;
2373
2374                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2375                 case VIRTIO_NET_HDR_GSO_TCPV4:
2376                 case VIRTIO_NET_HDR_GSO_TCPV6:
2377                         if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
2378                                 break;
2379                         m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2380                         m->tso_segsz = hdr->gso_size;
2381                         break;
2382                 case VIRTIO_NET_HDR_GSO_UDP:
2383                         if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
2384                                 break;
2385                         m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2386                         m->tso_segsz = hdr->gso_size;
2387                         break;
2388                 default:
2389                         break;
2390                 }
2391         }
2392 }
2393
2394 static __rte_noinline void
2395 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
2396                 struct buf_vector *buf_vec)
2397 {
2398         uint64_t len;
2399         uint64_t remain = sizeof(struct virtio_net_hdr);
2400         uint64_t src;
2401         uint64_t dst = (uint64_t)(uintptr_t)hdr;
2402
2403         while (remain) {
2404                 len = RTE_MIN(remain, buf_vec->buf_len);
2405                 src = buf_vec->buf_addr;
2406                 rte_memcpy((void *)(uintptr_t)dst,
2407                                 (void *)(uintptr_t)src, len);
2408
2409                 remain -= len;
2410                 dst += len;
2411                 buf_vec++;
2412         }
2413 }
2414
2415 static __rte_always_inline int
2416 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
2417                   struct buf_vector *buf_vec, uint16_t nr_vec,
2418                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
2419                   bool legacy_ol_flags)
2420 {
2421         uint32_t buf_avail, buf_offset;
2422         uint64_t buf_addr, buf_len;
2423         uint32_t mbuf_avail, mbuf_offset;
2424         uint32_t cpy_len;
2425         struct rte_mbuf *cur = m, *prev = m;
2426         struct virtio_net_hdr tmp_hdr;
2427         struct virtio_net_hdr *hdr = NULL;
2428         /* A counter to avoid desc dead loop chain */
2429         uint16_t vec_idx = 0;
2430         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
2431         int error = 0;
2432
2433         buf_addr = buf_vec[vec_idx].buf_addr;
2434         buf_len = buf_vec[vec_idx].buf_len;
2435
2436         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
2437                 error = -1;
2438                 goto out;
2439         }
2440
2441         if (virtio_net_with_host_offload(dev)) {
2442                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
2443                         /*
2444                          * No luck, the virtio-net header doesn't fit
2445                          * in a contiguous virtual area.
2446                          */
2447                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
2448                         hdr = &tmp_hdr;
2449                 } else {
2450                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
2451                 }
2452         }
2453
2454         /*
2455          * A virtio driver normally uses at least 2 desc buffers
2456          * for Tx: the first for storing the header, and others
2457          * for storing the data.
2458          */
2459         if (unlikely(buf_len < dev->vhost_hlen)) {
2460                 buf_offset = dev->vhost_hlen - buf_len;
2461                 vec_idx++;
2462                 buf_addr = buf_vec[vec_idx].buf_addr;
2463                 buf_len = buf_vec[vec_idx].buf_len;
2464                 buf_avail  = buf_len - buf_offset;
2465         } else if (buf_len == dev->vhost_hlen) {
2466                 if (unlikely(++vec_idx >= nr_vec))
2467                         goto out;
2468                 buf_addr = buf_vec[vec_idx].buf_addr;
2469                 buf_len = buf_vec[vec_idx].buf_len;
2470
2471                 buf_offset = 0;
2472                 buf_avail = buf_len;
2473         } else {
2474                 buf_offset = dev->vhost_hlen;
2475                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
2476         }
2477
2478         PRINT_PACKET(dev,
2479                         (uintptr_t)(buf_addr + buf_offset),
2480                         (uint32_t)buf_avail, 0);
2481
2482         mbuf_offset = 0;
2483         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
2484         while (1) {
2485                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
2486
2487                 if (likely(cpy_len > MAX_BATCH_LEN ||
2488                                         vq->batch_copy_nb_elems >= vq->size ||
2489                                         (hdr && cur == m))) {
2490                         rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
2491                                                 mbuf_offset),
2492                                         (void *)((uintptr_t)(buf_addr +
2493                                                         buf_offset)), cpy_len);
2494                 } else {
2495                         batch_copy[vq->batch_copy_nb_elems].dst =
2496                                 rte_pktmbuf_mtod_offset(cur, void *,
2497                                                 mbuf_offset);
2498                         batch_copy[vq->batch_copy_nb_elems].src =
2499                                 (void *)((uintptr_t)(buf_addr + buf_offset));
2500                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
2501                         vq->batch_copy_nb_elems++;
2502                 }
2503
2504                 mbuf_avail  -= cpy_len;
2505                 mbuf_offset += cpy_len;
2506                 buf_avail -= cpy_len;
2507                 buf_offset += cpy_len;
2508
2509                 /* This buf reaches to its end, get the next one */
2510                 if (buf_avail == 0) {
2511                         if (++vec_idx >= nr_vec)
2512                                 break;
2513
2514                         buf_addr = buf_vec[vec_idx].buf_addr;
2515                         buf_len = buf_vec[vec_idx].buf_len;
2516
2517                         buf_offset = 0;
2518                         buf_avail  = buf_len;
2519
2520                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
2521                                         (uint32_t)buf_avail, 0);
2522                 }
2523
2524                 /*
2525                  * This mbuf reaches to its end, get a new one
2526                  * to hold more data.
2527                  */
2528                 if (mbuf_avail == 0) {
2529                         cur = rte_pktmbuf_alloc(mbuf_pool);
2530                         if (unlikely(cur == NULL)) {
2531                                 VHOST_LOG_DATA(ERR, "Failed to "
2532                                         "allocate memory for mbuf.\n");
2533                                 error = -1;
2534                                 goto out;
2535                         }
2536
2537                         prev->next = cur;
2538                         prev->data_len = mbuf_offset;
2539                         m->nb_segs += 1;
2540                         m->pkt_len += mbuf_offset;
2541                         prev = cur;
2542
2543                         mbuf_offset = 0;
2544                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2545                 }
2546         }
2547
2548         prev->data_len = mbuf_offset;
2549         m->pkt_len    += mbuf_offset;
2550
2551         if (hdr)
2552                 vhost_dequeue_offload(hdr, m, legacy_ol_flags);
2553
2554 out:
2555
2556         return error;
2557 }
2558
2559 static void
2560 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2561 {
2562         rte_free(opaque);
2563 }
2564
2565 static int
2566 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
2567 {
2568         struct rte_mbuf_ext_shared_info *shinfo = NULL;
2569         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2570         uint16_t buf_len;
2571         rte_iova_t iova;
2572         void *buf;
2573
2574         total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2575         total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2576
2577         if (unlikely(total_len > UINT16_MAX))
2578                 return -ENOSPC;
2579
2580         buf_len = total_len;
2581         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2582         if (unlikely(buf == NULL))
2583                 return -ENOMEM;
2584
2585         /* Initialize shinfo */
2586         shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2587                                                 virtio_dev_extbuf_free, buf);
2588         if (unlikely(shinfo == NULL)) {
2589                 rte_free(buf);
2590                 VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
2591                 return -1;
2592         }
2593
2594         iova = rte_malloc_virt2iova(buf);
2595         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2596         rte_pktmbuf_reset_headroom(pkt);
2597
2598         return 0;
2599 }
2600
2601 /*
2602  * Prepare a host supported pktmbuf.
2603  */
2604 static __rte_always_inline int
2605 virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
2606                          uint32_t data_len)
2607 {
2608         if (rte_pktmbuf_tailroom(pkt) >= data_len)
2609                 return 0;
2610
2611         /* attach an external buffer if supported */
2612         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
2613                 return 0;
2614
2615         /* check if chained buffers are allowed */
2616         if (!dev->linearbuf)
2617                 return 0;
2618
2619         return -1;
2620 }
2621
2622 __rte_always_inline
2623 static uint16_t
2624 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2625         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
2626         bool legacy_ol_flags)
2627 {
2628         uint16_t i;
2629         uint16_t free_entries;
2630         uint16_t dropped = 0;
2631         static bool allocerr_warned;
2632
2633         /*
2634          * The ordering between avail index and
2635          * desc reads needs to be enforced.
2636          */
2637         free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2638                         vq->last_avail_idx;
2639         if (free_entries == 0)
2640                 return 0;
2641
2642         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2643
2644         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2645
2646         count = RTE_MIN(count, MAX_PKT_BURST);
2647         count = RTE_MIN(count, free_entries);
2648         VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
2649                         dev->vid, count);
2650
2651         if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2652                 return 0;
2653
2654         for (i = 0; i < count; i++) {
2655                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2656                 uint16_t head_idx;
2657                 uint32_t buf_len;
2658                 uint16_t nr_vec = 0;
2659                 int err;
2660
2661                 if (unlikely(fill_vec_buf_split(dev, vq,
2662                                                 vq->last_avail_idx + i,
2663                                                 &nr_vec, buf_vec,
2664                                                 &head_idx, &buf_len,
2665                                                 VHOST_ACCESS_RO) < 0))
2666                         break;
2667
2668                 update_shadow_used_ring_split(vq, head_idx, 0);
2669
2670                 err = virtio_dev_pktmbuf_prep(dev, pkts[i], buf_len);
2671                 if (unlikely(err)) {
2672                         /*
2673                          * mbuf allocation fails for jumbo packets when external
2674                          * buffer allocation is not allowed and linear buffer
2675                          * is required. Drop this packet.
2676                          */
2677                         if (!allocerr_warned) {
2678                                 VHOST_LOG_DATA(ERR,
2679                                         "Failed mbuf alloc of size %d from %s on %s.\n",
2680                                         buf_len, mbuf_pool->name, dev->ifname);
2681                                 allocerr_warned = true;
2682                         }
2683                         dropped += 1;
2684                         i++;
2685                         break;
2686                 }
2687
2688                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2689                                 mbuf_pool, legacy_ol_flags);
2690                 if (unlikely(err)) {
2691                         if (!allocerr_warned) {
2692                                 VHOST_LOG_DATA(ERR,
2693                                         "Failed to copy desc to mbuf on %s.\n",
2694                                         dev->ifname);
2695                                 allocerr_warned = true;
2696                         }
2697                         dropped += 1;
2698                         i++;
2699                         break;
2700                 }
2701         }
2702
2703         if (dropped)
2704                 rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1);
2705
2706         vq->last_avail_idx += i;
2707
2708         do_data_copy_dequeue(vq);
2709         if (unlikely(i < count))
2710                 vq->shadow_used_idx = i;
2711         if (likely(vq->shadow_used_idx)) {
2712                 flush_shadow_used_ring_split(dev, vq);
2713                 vhost_vring_call_split(dev, vq);
2714         }
2715
2716         return (i - dropped);
2717 }
2718
2719 __rte_noinline
2720 static uint16_t
2721 virtio_dev_tx_split_legacy(struct virtio_net *dev,
2722         struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2723         struct rte_mbuf **pkts, uint16_t count)
2724 {
2725         return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
2726 }
2727
2728 __rte_noinline
2729 static uint16_t
2730 virtio_dev_tx_split_compliant(struct virtio_net *dev,
2731         struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2732         struct rte_mbuf **pkts, uint16_t count)
2733 {
2734         return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
2735 }
2736
2737 static __rte_always_inline int
2738 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2739                                  struct vhost_virtqueue *vq,
2740                                  struct rte_mbuf **pkts,
2741                                  uint16_t avail_idx,
2742                                  uintptr_t *desc_addrs,
2743                                  uint16_t *ids)
2744 {
2745         bool wrap = vq->avail_wrap_counter;
2746         struct vring_packed_desc *descs = vq->desc_packed;
2747         uint64_t lens[PACKED_BATCH_SIZE];
2748         uint64_t buf_lens[PACKED_BATCH_SIZE];
2749         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2750         uint16_t flags, i;
2751
2752         if (unlikely(avail_idx & PACKED_BATCH_MASK))
2753                 return -1;
2754         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2755                 return -1;
2756
2757         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2758                 flags = descs[avail_idx + i].flags;
2759                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2760                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
2761                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2762                         return -1;
2763         }
2764
2765         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2766
2767         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2768                 lens[i] = descs[avail_idx + i].len;
2769
2770         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2771                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2772                                                   descs[avail_idx + i].addr,
2773                                                   &lens[i], VHOST_ACCESS_RW);
2774         }
2775
2776         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2777                 if (unlikely(!desc_addrs[i]))
2778                         return -1;
2779                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2780                         return -1;
2781         }
2782
2783         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2784                 if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
2785                         goto err;
2786         }
2787
2788         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2789                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2790
2791         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2792                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2793                         goto err;
2794         }
2795
2796         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2797                 pkts[i]->pkt_len = lens[i] - buf_offset;
2798                 pkts[i]->data_len = pkts[i]->pkt_len;
2799                 ids[i] = descs[avail_idx + i].id;
2800         }
2801
2802         return 0;
2803
2804 err:
2805         return -1;
2806 }
2807
2808 static __rte_always_inline int
2809 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2810                            struct vhost_virtqueue *vq,
2811                            struct rte_mbuf **pkts,
2812                            bool legacy_ol_flags)
2813 {
2814         uint16_t avail_idx = vq->last_avail_idx;
2815         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2816         struct virtio_net_hdr *hdr;
2817         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2818         uint16_t ids[PACKED_BATCH_SIZE];
2819         uint16_t i;
2820
2821         if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
2822                                              desc_addrs, ids))
2823                 return -1;
2824
2825         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2826                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2827
2828         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2829                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2830                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2831                            pkts[i]->pkt_len);
2832
2833         if (virtio_net_with_host_offload(dev)) {
2834                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2835                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2836                         vhost_dequeue_offload(hdr, pkts[i], legacy_ol_flags);
2837                 }
2838         }
2839
2840         if (virtio_net_is_inorder(dev))
2841                 vhost_shadow_dequeue_batch_packed_inorder(vq,
2842                         ids[PACKED_BATCH_SIZE - 1]);
2843         else
2844                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2845
2846         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2847
2848         return 0;
2849 }
2850
2851 static __rte_always_inline int
2852 vhost_dequeue_single_packed(struct virtio_net *dev,
2853                             struct vhost_virtqueue *vq,
2854                             struct rte_mempool *mbuf_pool,
2855                             struct rte_mbuf *pkts,
2856                             uint16_t *buf_id,
2857                             uint16_t *desc_count,
2858                             bool legacy_ol_flags)
2859 {
2860         struct buf_vector buf_vec[BUF_VECTOR_MAX];
2861         uint32_t buf_len;
2862         uint16_t nr_vec = 0;
2863         int err;
2864         static bool allocerr_warned;
2865
2866         if (unlikely(fill_vec_buf_packed(dev, vq,
2867                                          vq->last_avail_idx, desc_count,
2868                                          buf_vec, &nr_vec,
2869                                          buf_id, &buf_len,
2870                                          VHOST_ACCESS_RO) < 0))
2871                 return -1;
2872
2873         if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
2874                 if (!allocerr_warned) {
2875                         VHOST_LOG_DATA(ERR,
2876                                 "Failed mbuf alloc of size %d from %s on %s.\n",
2877                                 buf_len, mbuf_pool->name, dev->ifname);
2878                         allocerr_warned = true;
2879                 }
2880                 return -1;
2881         }
2882
2883         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
2884                                 mbuf_pool, legacy_ol_flags);
2885         if (unlikely(err)) {
2886                 if (!allocerr_warned) {
2887                         VHOST_LOG_DATA(ERR,
2888                                 "Failed to copy desc to mbuf on %s.\n",
2889                                 dev->ifname);
2890                         allocerr_warned = true;
2891                 }
2892                 return -1;
2893         }
2894
2895         return 0;
2896 }
2897
2898 static __rte_always_inline int
2899 virtio_dev_tx_single_packed(struct virtio_net *dev,
2900                             struct vhost_virtqueue *vq,
2901                             struct rte_mempool *mbuf_pool,
2902                             struct rte_mbuf *pkts,
2903                             bool legacy_ol_flags)
2904 {
2905
2906         uint16_t buf_id, desc_count = 0;
2907         int ret;
2908
2909         ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2910                                         &desc_count, legacy_ol_flags);
2911
2912         if (likely(desc_count > 0)) {
2913                 if (virtio_net_is_inorder(dev))
2914                         vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
2915                                                                    desc_count);
2916                 else
2917                         vhost_shadow_dequeue_single_packed(vq, buf_id,
2918                                         desc_count);
2919
2920                 vq_inc_last_avail_packed(vq, desc_count);
2921         }
2922
2923         return ret;
2924 }
2925
2926 __rte_always_inline
2927 static uint16_t
2928 virtio_dev_tx_packed(struct virtio_net *dev,
2929                      struct vhost_virtqueue *__rte_restrict vq,
2930                      struct rte_mempool *mbuf_pool,
2931                      struct rte_mbuf **__rte_restrict pkts,
2932                      uint32_t count,
2933                      bool legacy_ol_flags)
2934 {
2935         uint32_t pkt_idx = 0;
2936
2937         if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2938                 return 0;
2939
2940         do {
2941                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2942
2943                 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
2944                         if (!virtio_dev_tx_batch_packed(dev, vq,
2945                                                         &pkts[pkt_idx],
2946                                                         legacy_ol_flags)) {
2947                                 pkt_idx += PACKED_BATCH_SIZE;
2948                                 continue;
2949                         }
2950                 }
2951
2952                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2953                                                 pkts[pkt_idx],
2954                                                 legacy_ol_flags))
2955                         break;
2956                 pkt_idx++;
2957         } while (pkt_idx < count);
2958
2959         if (pkt_idx != count)
2960                 rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
2961
2962         if (vq->shadow_used_idx) {
2963                 do_data_copy_dequeue(vq);
2964
2965                 vhost_flush_dequeue_shadow_packed(dev, vq);
2966                 vhost_vring_call_packed(dev, vq);
2967         }
2968
2969         return pkt_idx;
2970 }
2971
2972 __rte_noinline
2973 static uint16_t
2974 virtio_dev_tx_packed_legacy(struct virtio_net *dev,
2975         struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
2976         struct rte_mbuf **__rte_restrict pkts, uint32_t count)
2977 {
2978         return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
2979 }
2980
2981 __rte_noinline
2982 static uint16_t
2983 virtio_dev_tx_packed_compliant(struct virtio_net *dev,
2984         struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
2985         struct rte_mbuf **__rte_restrict pkts, uint32_t count)
2986 {
2987         return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
2988 }
2989
2990 uint16_t
2991 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2992         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2993 {
2994         struct virtio_net *dev;
2995         struct rte_mbuf *rarp_mbuf = NULL;
2996         struct vhost_virtqueue *vq;
2997         int16_t success = 1;
2998
2999         dev = get_device(vid);
3000         if (!dev)
3001                 return 0;
3002
3003         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
3004                 VHOST_LOG_DATA(ERR,
3005                         "(%d) %s: built-in vhost net backend is disabled.\n",
3006                         dev->vid, __func__);
3007                 return 0;
3008         }
3009
3010         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
3011                 VHOST_LOG_DATA(ERR,
3012                         "(%d) %s: invalid virtqueue idx %d.\n",
3013                         dev->vid, __func__, queue_id);
3014                 return 0;
3015         }
3016
3017         vq = dev->virtqueue[queue_id];
3018
3019         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
3020                 return 0;
3021
3022         if (unlikely(!vq->enabled)) {
3023                 count = 0;
3024                 goto out_access_unlock;
3025         }
3026
3027         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3028                 vhost_user_iotlb_rd_lock(vq);
3029
3030         if (unlikely(!vq->access_ok))
3031                 if (unlikely(vring_translate(dev, vq) < 0)) {
3032                         count = 0;
3033                         goto out;
3034                 }
3035
3036         /*
3037          * Construct a RARP broadcast packet, and inject it to the "pkts"
3038          * array, to looks like that guest actually send such packet.
3039          *
3040          * Check user_send_rarp() for more information.
3041          *
3042          * broadcast_rarp shares a cacheline in the virtio_net structure
3043          * with some fields that are accessed during enqueue and
3044          * __atomic_compare_exchange_n causes a write if performed compare
3045          * and exchange. This could result in false sharing between enqueue
3046          * and dequeue.
3047          *
3048          * Prevent unnecessary false sharing by reading broadcast_rarp first
3049          * and only performing compare and exchange if the read indicates it
3050          * is likely to be set.
3051          */
3052         if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
3053                         __atomic_compare_exchange_n(&dev->broadcast_rarp,
3054                         &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
3055
3056                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
3057                 if (rarp_mbuf == NULL) {
3058                         VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
3059                         count = 0;
3060                         goto out;
3061                 }
3062                 /*
3063                  * Inject it to the head of "pkts" array, so that switch's mac
3064                  * learning table will get updated first.
3065                  */
3066                 pkts[0] = rarp_mbuf;
3067                 pkts++;
3068                 count -= 1;
3069         }
3070
3071         if (vq_is_packed(dev)) {
3072                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3073                         count = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
3074                 else
3075                         count = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
3076         } else {
3077                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3078                         count = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
3079                 else
3080                         count = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
3081         }
3082
3083 out:
3084         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3085                 vhost_user_iotlb_rd_unlock(vq);
3086
3087 out_access_unlock:
3088         rte_spinlock_unlock(&vq->access_lock);
3089
3090         if (unlikely(rarp_mbuf != NULL))
3091                 count += 1;
3092
3093         return count;
3094 }