vhost: add statistics for in-flight packets
[dpdk.git] / lib / vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_net.h>
12 #include <rte_ether.h>
13 #include <rte_ip.h>
14 #include <rte_dmadev.h>
15 #include <rte_vhost.h>
16 #include <rte_tcp.h>
17 #include <rte_udp.h>
18 #include <rte_sctp.h>
19 #include <rte_arp.h>
20 #include <rte_spinlock.h>
21 #include <rte_malloc.h>
22 #include <rte_vhost_async.h>
23
24 #include "iotlb.h"
25 #include "vhost.h"
26
27 #define MAX_BATCH_LEN 256
28
29 /* DMA device copy operation tracking array. */
30 struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
31
32 static  __rte_always_inline bool
33 rxvq_is_mergeable(struct virtio_net *dev)
34 {
35         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
36 }
37
38 static  __rte_always_inline bool
39 virtio_net_is_inorder(struct virtio_net *dev)
40 {
41         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
42 }
43
44 static bool
45 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
46 {
47         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
48 }
49
50 /*
51  * This function must be called with virtqueue's access_lock taken.
52  */
53 static inline void
54 vhost_queue_stats_update(struct virtio_net *dev, struct vhost_virtqueue *vq,
55                 struct rte_mbuf **pkts, uint16_t count)
56 {
57         struct virtqueue_stats *stats = &vq->stats;
58         int i;
59
60         if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
61                 return;
62
63         for (i = 0; i < count; i++) {
64                 struct rte_ether_addr *ea;
65                 struct rte_mbuf *pkt = pkts[i];
66                 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt);
67
68                 stats->packets++;
69                 stats->bytes += pkt_len;
70
71                 if (pkt_len == 64) {
72                         stats->size_bins[1]++;
73                 } else if (pkt_len > 64 && pkt_len < 1024) {
74                         uint32_t bin;
75
76                         /* count zeros, and offset into correct bin */
77                         bin = (sizeof(pkt_len) * 8) - __builtin_clz(pkt_len) - 5;
78                         stats->size_bins[bin]++;
79                 } else {
80                         if (pkt_len < 64)
81                                 stats->size_bins[0]++;
82                         else if (pkt_len < 1519)
83                                 stats->size_bins[6]++;
84                         else
85                                 stats->size_bins[7]++;
86                 }
87
88                 ea = rte_pktmbuf_mtod(pkt, struct rte_ether_addr *);
89                 if (rte_is_multicast_ether_addr(ea)) {
90                         if (rte_is_broadcast_ether_addr(ea))
91                                 stats->broadcast++;
92                         else
93                                 stats->multicast++;
94                 }
95         }
96 }
97
98 static __rte_always_inline int64_t
99 vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
100                 int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
101                 struct vhost_iov_iter *pkt)
102 {
103         struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
104         uint16_t ring_mask = dma_info->ring_mask;
105         static bool vhost_async_dma_copy_log;
106
107
108         struct vhost_iovec *iov = pkt->iov;
109         int copy_idx = 0;
110         uint32_t nr_segs = pkt->nr_segs;
111         uint16_t i;
112
113         if (rte_dma_burst_capacity(dma_id, vchan_id) < nr_segs)
114                 return -1;
115
116         for (i = 0; i < nr_segs; i++) {
117                 copy_idx = rte_dma_copy(dma_id, vchan_id, (rte_iova_t)iov[i].src_addr,
118                                 (rte_iova_t)iov[i].dst_addr, iov[i].len, RTE_DMA_OP_FLAG_LLC);
119                 /**
120                  * Since all memory is pinned and DMA vChannel
121                  * ring has enough space, failure should be a
122                  * rare case. If failure happens, it means DMA
123                  * device encounters serious errors; in this
124                  * case, please stop async data-path and check
125                  * what has happened to DMA device.
126                  */
127                 if (unlikely(copy_idx < 0)) {
128                         if (!vhost_async_dma_copy_log) {
129                                 VHOST_LOG_DATA(ERR, "(%s) DMA copy failed for channel %d:%u\n",
130                                                 dev->ifname, dma_id, vchan_id);
131                                 vhost_async_dma_copy_log = true;
132                         }
133                         return -1;
134                 }
135         }
136
137         /**
138          * Only store packet completion flag address in the last copy's
139          * slot, and other slots are set to NULL.
140          */
141         dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask] = &vq->async->pkts_cmpl_flag[flag_idx];
142
143         return nr_segs;
144 }
145
146 static __rte_always_inline uint16_t
147 vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue *vq,
148                 int16_t dma_id, uint16_t vchan_id, uint16_t head_idx,
149                 struct vhost_iov_iter *pkts, uint16_t nr_pkts)
150 {
151         struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
152         int64_t ret, nr_copies = 0;
153         uint16_t pkt_idx;
154
155         rte_spinlock_lock(&dma_info->dma_lock);
156
157         for (pkt_idx = 0; pkt_idx < nr_pkts; pkt_idx++) {
158                 ret = vhost_async_dma_transfer_one(dev, vq, dma_id, vchan_id, head_idx,
159                                 &pkts[pkt_idx]);
160                 if (unlikely(ret < 0))
161                         break;
162
163                 nr_copies += ret;
164                 head_idx++;
165                 if (head_idx >= vq->size)
166                         head_idx -= vq->size;
167         }
168
169         if (likely(nr_copies > 0))
170                 rte_dma_submit(dma_id, vchan_id);
171
172         rte_spinlock_unlock(&dma_info->dma_lock);
173
174         return pkt_idx;
175 }
176
177 static __rte_always_inline uint16_t
178 vhost_async_dma_check_completed(struct virtio_net *dev, int16_t dma_id, uint16_t vchan_id,
179                 uint16_t max_pkts)
180 {
181         struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
182         uint16_t ring_mask = dma_info->ring_mask;
183         uint16_t last_idx = 0;
184         uint16_t nr_copies;
185         uint16_t copy_idx;
186         uint16_t i;
187         bool has_error = false;
188         static bool vhost_async_dma_complete_log;
189
190         rte_spinlock_lock(&dma_info->dma_lock);
191
192         /**
193          * Print error log for debugging, if DMA reports error during
194          * DMA transfer. We do not handle error in vhost level.
195          */
196         nr_copies = rte_dma_completed(dma_id, vchan_id, max_pkts, &last_idx, &has_error);
197         if (unlikely(!vhost_async_dma_complete_log && has_error)) {
198                 VHOST_LOG_DATA(ERR, "(%s) DMA completion failure on channel %d:%u\n", dev->ifname,
199                                 dma_id, vchan_id);
200                 vhost_async_dma_complete_log = true;
201         } else if (nr_copies == 0) {
202                 goto out;
203         }
204
205         copy_idx = last_idx - nr_copies + 1;
206         for (i = 0; i < nr_copies; i++) {
207                 bool *flag;
208
209                 flag = dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask];
210                 if (flag) {
211                         /**
212                          * Mark the packet flag as received. The flag
213                          * could belong to another virtqueue but write
214                          * is atomic.
215                          */
216                         *flag = true;
217                         dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask] = NULL;
218                 }
219                 copy_idx++;
220         }
221
222 out:
223         rte_spinlock_unlock(&dma_info->dma_lock);
224         return nr_copies;
225 }
226
227 static inline void
228 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
229 {
230         struct batch_copy_elem *elem = vq->batch_copy_elems;
231         uint16_t count = vq->batch_copy_nb_elems;
232         int i;
233
234         for (i = 0; i < count; i++) {
235                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
236                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
237                                            elem[i].len);
238                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
239         }
240
241         vq->batch_copy_nb_elems = 0;
242 }
243
244 static inline void
245 do_data_copy_dequeue(struct vhost_virtqueue *vq)
246 {
247         struct batch_copy_elem *elem = vq->batch_copy_elems;
248         uint16_t count = vq->batch_copy_nb_elems;
249         int i;
250
251         for (i = 0; i < count; i++)
252                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
253
254         vq->batch_copy_nb_elems = 0;
255 }
256
257 static __rte_always_inline void
258 do_flush_shadow_used_ring_split(struct virtio_net *dev,
259                         struct vhost_virtqueue *vq,
260                         uint16_t to, uint16_t from, uint16_t size)
261 {
262         rte_memcpy(&vq->used->ring[to],
263                         &vq->shadow_used_split[from],
264                         size * sizeof(struct vring_used_elem));
265         vhost_log_cache_used_vring(dev, vq,
266                         offsetof(struct vring_used, ring[to]),
267                         size * sizeof(struct vring_used_elem));
268 }
269
270 static __rte_always_inline void
271 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
272 {
273         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
274
275         if (used_idx + vq->shadow_used_idx <= vq->size) {
276                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
277                                           vq->shadow_used_idx);
278         } else {
279                 uint16_t size;
280
281                 /* update used ring interval [used_idx, vq->size] */
282                 size = vq->size - used_idx;
283                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
284
285                 /* update the left half used ring interval [0, left_size] */
286                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
287                                           vq->shadow_used_idx - size);
288         }
289         vq->last_used_idx += vq->shadow_used_idx;
290
291         vhost_log_cache_sync(dev, vq);
292
293         __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
294                            __ATOMIC_RELEASE);
295         vq->shadow_used_idx = 0;
296         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
297                 sizeof(vq->used->idx));
298 }
299
300 static __rte_always_inline void
301 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
302                          uint16_t desc_idx, uint32_t len)
303 {
304         uint16_t i = vq->shadow_used_idx++;
305
306         vq->shadow_used_split[i].id  = desc_idx;
307         vq->shadow_used_split[i].len = len;
308 }
309
310 static __rte_always_inline void
311 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
312                                   struct vhost_virtqueue *vq)
313 {
314         int i;
315         uint16_t used_idx = vq->last_used_idx;
316         uint16_t head_idx = vq->last_used_idx;
317         uint16_t head_flags = 0;
318
319         /* Split loop in two to save memory barriers */
320         for (i = 0; i < vq->shadow_used_idx; i++) {
321                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
322                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
323
324                 used_idx += vq->shadow_used_packed[i].count;
325                 if (used_idx >= vq->size)
326                         used_idx -= vq->size;
327         }
328
329         /* The ordering for storing desc flags needs to be enforced. */
330         rte_atomic_thread_fence(__ATOMIC_RELEASE);
331
332         for (i = 0; i < vq->shadow_used_idx; i++) {
333                 uint16_t flags;
334
335                 if (vq->shadow_used_packed[i].len)
336                         flags = VRING_DESC_F_WRITE;
337                 else
338                         flags = 0;
339
340                 if (vq->used_wrap_counter) {
341                         flags |= VRING_DESC_F_USED;
342                         flags |= VRING_DESC_F_AVAIL;
343                 } else {
344                         flags &= ~VRING_DESC_F_USED;
345                         flags &= ~VRING_DESC_F_AVAIL;
346                 }
347
348                 if (i > 0) {
349                         vq->desc_packed[vq->last_used_idx].flags = flags;
350
351                         vhost_log_cache_used_vring(dev, vq,
352                                         vq->last_used_idx *
353                                         sizeof(struct vring_packed_desc),
354                                         sizeof(struct vring_packed_desc));
355                 } else {
356                         head_idx = vq->last_used_idx;
357                         head_flags = flags;
358                 }
359
360                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
361         }
362
363         vq->desc_packed[head_idx].flags = head_flags;
364
365         vhost_log_cache_used_vring(dev, vq,
366                                 head_idx *
367                                 sizeof(struct vring_packed_desc),
368                                 sizeof(struct vring_packed_desc));
369
370         vq->shadow_used_idx = 0;
371         vhost_log_cache_sync(dev, vq);
372 }
373
374 static __rte_always_inline void
375 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
376                                   struct vhost_virtqueue *vq)
377 {
378         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
379
380         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
381         /* desc flags is the synchronization point for virtio packed vring */
382         __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
383                          used_elem->flags, __ATOMIC_RELEASE);
384
385         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
386                                    sizeof(struct vring_packed_desc),
387                                    sizeof(struct vring_packed_desc));
388         vq->shadow_used_idx = 0;
389         vhost_log_cache_sync(dev, vq);
390 }
391
392 static __rte_always_inline void
393 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
394                                  struct vhost_virtqueue *vq,
395                                  uint64_t *lens,
396                                  uint16_t *ids)
397 {
398         uint16_t i;
399         uint16_t flags;
400         uint16_t last_used_idx;
401         struct vring_packed_desc *desc_base;
402
403         last_used_idx = vq->last_used_idx;
404         desc_base = &vq->desc_packed[last_used_idx];
405
406         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
407
408         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
409                 desc_base[i].id = ids[i];
410                 desc_base[i].len = lens[i];
411         }
412
413         rte_atomic_thread_fence(__ATOMIC_RELEASE);
414
415         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
416                 desc_base[i].flags = flags;
417         }
418
419         vhost_log_cache_used_vring(dev, vq, last_used_idx *
420                                    sizeof(struct vring_packed_desc),
421                                    sizeof(struct vring_packed_desc) *
422                                    PACKED_BATCH_SIZE);
423         vhost_log_cache_sync(dev, vq);
424
425         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
426 }
427
428 static __rte_always_inline void
429 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
430                                           uint16_t id)
431 {
432         vq->shadow_used_packed[0].id = id;
433
434         if (!vq->shadow_used_idx) {
435                 vq->shadow_last_used_idx = vq->last_used_idx;
436                 vq->shadow_used_packed[0].flags =
437                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
438                 vq->shadow_used_packed[0].len = 0;
439                 vq->shadow_used_packed[0].count = 1;
440                 vq->shadow_used_idx++;
441         }
442
443         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
444 }
445
446 static __rte_always_inline void
447 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
448                                   struct vhost_virtqueue *vq,
449                                   uint16_t *ids)
450 {
451         uint16_t flags;
452         uint16_t i;
453         uint16_t begin;
454
455         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
456
457         if (!vq->shadow_used_idx) {
458                 vq->shadow_last_used_idx = vq->last_used_idx;
459                 vq->shadow_used_packed[0].id  = ids[0];
460                 vq->shadow_used_packed[0].len = 0;
461                 vq->shadow_used_packed[0].count = 1;
462                 vq->shadow_used_packed[0].flags = flags;
463                 vq->shadow_used_idx++;
464                 begin = 1;
465         } else
466                 begin = 0;
467
468         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
469                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
470                 vq->desc_packed[vq->last_used_idx + i].len = 0;
471         }
472
473         rte_atomic_thread_fence(__ATOMIC_RELEASE);
474         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
475                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
476
477         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
478                                    sizeof(struct vring_packed_desc),
479                                    sizeof(struct vring_packed_desc) *
480                                    PACKED_BATCH_SIZE);
481         vhost_log_cache_sync(dev, vq);
482
483         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
484 }
485
486 static __rte_always_inline void
487 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
488                                    uint16_t buf_id,
489                                    uint16_t count)
490 {
491         uint16_t flags;
492
493         flags = vq->desc_packed[vq->last_used_idx].flags;
494         if (vq->used_wrap_counter) {
495                 flags |= VRING_DESC_F_USED;
496                 flags |= VRING_DESC_F_AVAIL;
497         } else {
498                 flags &= ~VRING_DESC_F_USED;
499                 flags &= ~VRING_DESC_F_AVAIL;
500         }
501
502         if (!vq->shadow_used_idx) {
503                 vq->shadow_last_used_idx = vq->last_used_idx;
504
505                 vq->shadow_used_packed[0].id  = buf_id;
506                 vq->shadow_used_packed[0].len = 0;
507                 vq->shadow_used_packed[0].flags = flags;
508                 vq->shadow_used_idx++;
509         } else {
510                 vq->desc_packed[vq->last_used_idx].id = buf_id;
511                 vq->desc_packed[vq->last_used_idx].len = 0;
512                 vq->desc_packed[vq->last_used_idx].flags = flags;
513         }
514
515         vq_inc_last_used_packed(vq, count);
516 }
517
518 static __rte_always_inline void
519 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
520                                            uint16_t buf_id,
521                                            uint16_t count)
522 {
523         uint16_t flags;
524
525         vq->shadow_used_packed[0].id = buf_id;
526
527         flags = vq->desc_packed[vq->last_used_idx].flags;
528         if (vq->used_wrap_counter) {
529                 flags |= VRING_DESC_F_USED;
530                 flags |= VRING_DESC_F_AVAIL;
531         } else {
532                 flags &= ~VRING_DESC_F_USED;
533                 flags &= ~VRING_DESC_F_AVAIL;
534         }
535
536         if (!vq->shadow_used_idx) {
537                 vq->shadow_last_used_idx = vq->last_used_idx;
538                 vq->shadow_used_packed[0].len = 0;
539                 vq->shadow_used_packed[0].flags = flags;
540                 vq->shadow_used_idx++;
541         }
542
543         vq_inc_last_used_packed(vq, count);
544 }
545
546 static __rte_always_inline void
547 vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
548                                    uint32_t *len,
549                                    uint16_t *id,
550                                    uint16_t *count,
551                                    uint16_t num_buffers)
552 {
553         uint16_t i;
554
555         for (i = 0; i < num_buffers; i++) {
556                 /* enqueue shadow flush action aligned with batch num */
557                 if (!vq->shadow_used_idx)
558                         vq->shadow_aligned_idx = vq->last_used_idx &
559                                 PACKED_BATCH_MASK;
560                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
561                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
562                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
563                 vq->shadow_aligned_idx += count[i];
564                 vq->shadow_used_idx++;
565         }
566 }
567
568 static __rte_always_inline void
569 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
570                                    struct vhost_virtqueue *vq,
571                                    uint32_t *len,
572                                    uint16_t *id,
573                                    uint16_t *count,
574                                    uint16_t num_buffers)
575 {
576         vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
577
578         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
579                 do_data_copy_enqueue(dev, vq);
580                 vhost_flush_enqueue_shadow_packed(dev, vq);
581         }
582 }
583
584 /* avoid write operation when necessary, to lessen cache issues */
585 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
586         if ((var) != (val))                     \
587                 (var) = (val);                  \
588 } while (0)
589
590 static __rte_always_inline void
591 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
592 {
593         uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
594
595         if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
596                 csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
597
598         if (csum_l4) {
599                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
600                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
601
602                 switch (csum_l4) {
603                 case RTE_MBUF_F_TX_TCP_CKSUM:
604                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
605                                                 cksum));
606                         break;
607                 case RTE_MBUF_F_TX_UDP_CKSUM:
608                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
609                                                 dgram_cksum));
610                         break;
611                 case RTE_MBUF_F_TX_SCTP_CKSUM:
612                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
613                                                 cksum));
614                         break;
615                 }
616         } else {
617                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
618                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
619                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
620         }
621
622         /* IP cksum verification cannot be bypassed, then calculate here */
623         if (m_buf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
624                 struct rte_ipv4_hdr *ipv4_hdr;
625
626                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
627                                                    m_buf->l2_len);
628                 ipv4_hdr->hdr_checksum = 0;
629                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
630         }
631
632         if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
633                 if (m_buf->ol_flags & RTE_MBUF_F_TX_IPV4)
634                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
635                 else
636                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
637                 net_hdr->gso_size = m_buf->tso_segsz;
638                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
639                                         + m_buf->l4_len;
640         } else if (m_buf->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
641                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
642                 net_hdr->gso_size = m_buf->tso_segsz;
643                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
644                         m_buf->l4_len;
645         } else {
646                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
647                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
648                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
649         }
650 }
651
652 static __rte_always_inline int
653 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
654                 struct buf_vector *buf_vec, uint16_t *vec_idx,
655                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
656 {
657         uint16_t vec_id = *vec_idx;
658
659         while (desc_len) {
660                 uint64_t desc_addr;
661                 uint64_t desc_chunck_len = desc_len;
662
663                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
664                         return -1;
665
666                 desc_addr = vhost_iova_to_vva(dev, vq,
667                                 desc_iova,
668                                 &desc_chunck_len,
669                                 perm);
670                 if (unlikely(!desc_addr))
671                         return -1;
672
673                 rte_prefetch0((void *)(uintptr_t)desc_addr);
674
675                 buf_vec[vec_id].buf_iova = desc_iova;
676                 buf_vec[vec_id].buf_addr = desc_addr;
677                 buf_vec[vec_id].buf_len  = desc_chunck_len;
678
679                 desc_len -= desc_chunck_len;
680                 desc_iova += desc_chunck_len;
681                 vec_id++;
682         }
683         *vec_idx = vec_id;
684
685         return 0;
686 }
687
688 static __rte_always_inline int
689 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
690                          uint32_t avail_idx, uint16_t *vec_idx,
691                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
692                          uint32_t *desc_chain_len, uint8_t perm)
693 {
694         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
695         uint16_t vec_id = *vec_idx;
696         uint32_t len    = 0;
697         uint64_t dlen;
698         uint32_t nr_descs = vq->size;
699         uint32_t cnt    = 0;
700         struct vring_desc *descs = vq->desc;
701         struct vring_desc *idesc = NULL;
702
703         if (unlikely(idx >= vq->size))
704                 return -1;
705
706         *desc_chain_head = idx;
707
708         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
709                 dlen = vq->desc[idx].len;
710                 nr_descs = dlen / sizeof(struct vring_desc);
711                 if (unlikely(nr_descs > vq->size))
712                         return -1;
713
714                 descs = (struct vring_desc *)(uintptr_t)
715                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
716                                                 &dlen,
717                                                 VHOST_ACCESS_RO);
718                 if (unlikely(!descs))
719                         return -1;
720
721                 if (unlikely(dlen < vq->desc[idx].len)) {
722                         /*
723                          * The indirect desc table is not contiguous
724                          * in process VA space, we have to copy it.
725                          */
726                         idesc = vhost_alloc_copy_ind_table(dev, vq,
727                                         vq->desc[idx].addr, vq->desc[idx].len);
728                         if (unlikely(!idesc))
729                                 return -1;
730
731                         descs = idesc;
732                 }
733
734                 idx = 0;
735         }
736
737         while (1) {
738                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
739                         free_ind_table(idesc);
740                         return -1;
741                 }
742
743                 dlen = descs[idx].len;
744                 len += dlen;
745
746                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
747                                                 descs[idx].addr, dlen,
748                                                 perm))) {
749                         free_ind_table(idesc);
750                         return -1;
751                 }
752
753                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
754                         break;
755
756                 idx = descs[idx].next;
757         }
758
759         *desc_chain_len = len;
760         *vec_idx = vec_id;
761
762         if (unlikely(!!idesc))
763                 free_ind_table(idesc);
764
765         return 0;
766 }
767
768 /*
769  * Returns -1 on fail, 0 on success
770  */
771 static inline int
772 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
773                                 uint32_t size, struct buf_vector *buf_vec,
774                                 uint16_t *num_buffers, uint16_t avail_head,
775                                 uint16_t *nr_vec)
776 {
777         uint16_t cur_idx;
778         uint16_t vec_idx = 0;
779         uint16_t max_tries, tries = 0;
780
781         uint16_t head_idx = 0;
782         uint32_t len = 0;
783
784         *num_buffers = 0;
785         cur_idx  = vq->last_avail_idx;
786
787         if (rxvq_is_mergeable(dev))
788                 max_tries = vq->size - 1;
789         else
790                 max_tries = 1;
791
792         while (size > 0) {
793                 if (unlikely(cur_idx == avail_head))
794                         return -1;
795                 /*
796                  * if we tried all available ring items, and still
797                  * can't get enough buf, it means something abnormal
798                  * happened.
799                  */
800                 if (unlikely(++tries > max_tries))
801                         return -1;
802
803                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
804                                                 &vec_idx, buf_vec,
805                                                 &head_idx, &len,
806                                                 VHOST_ACCESS_RW) < 0))
807                         return -1;
808                 len = RTE_MIN(len, size);
809                 update_shadow_used_ring_split(vq, head_idx, len);
810                 size -= len;
811
812                 cur_idx++;
813                 *num_buffers += 1;
814         }
815
816         *nr_vec = vec_idx;
817
818         return 0;
819 }
820
821 static __rte_always_inline int
822 fill_vec_buf_packed_indirect(struct virtio_net *dev,
823                         struct vhost_virtqueue *vq,
824                         struct vring_packed_desc *desc, uint16_t *vec_idx,
825                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
826 {
827         uint16_t i;
828         uint32_t nr_descs;
829         uint16_t vec_id = *vec_idx;
830         uint64_t dlen;
831         struct vring_packed_desc *descs, *idescs = NULL;
832
833         dlen = desc->len;
834         descs = (struct vring_packed_desc *)(uintptr_t)
835                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
836         if (unlikely(!descs))
837                 return -1;
838
839         if (unlikely(dlen < desc->len)) {
840                 /*
841                  * The indirect desc table is not contiguous
842                  * in process VA space, we have to copy it.
843                  */
844                 idescs = vhost_alloc_copy_ind_table(dev,
845                                 vq, desc->addr, desc->len);
846                 if (unlikely(!idescs))
847                         return -1;
848
849                 descs = idescs;
850         }
851
852         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
853         if (unlikely(nr_descs >= vq->size)) {
854                 free_ind_table(idescs);
855                 return -1;
856         }
857
858         for (i = 0; i < nr_descs; i++) {
859                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
860                         free_ind_table(idescs);
861                         return -1;
862                 }
863
864                 dlen = descs[i].len;
865                 *len += dlen;
866                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
867                                                 descs[i].addr, dlen,
868                                                 perm)))
869                         return -1;
870         }
871         *vec_idx = vec_id;
872
873         if (unlikely(!!idescs))
874                 free_ind_table(idescs);
875
876         return 0;
877 }
878
879 static __rte_always_inline int
880 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
881                                 uint16_t avail_idx, uint16_t *desc_count,
882                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
883                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
884 {
885         bool wrap_counter = vq->avail_wrap_counter;
886         struct vring_packed_desc *descs = vq->desc_packed;
887         uint16_t vec_id = *vec_idx;
888         uint64_t dlen;
889
890         if (avail_idx < vq->last_avail_idx)
891                 wrap_counter ^= 1;
892
893         /*
894          * Perform a load-acquire barrier in desc_is_avail to
895          * enforce the ordering between desc flags and desc
896          * content.
897          */
898         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
899                 return -1;
900
901         *desc_count = 0;
902         *len = 0;
903
904         while (1) {
905                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
906                         return -1;
907
908                 if (unlikely(*desc_count >= vq->size))
909                         return -1;
910
911                 *desc_count += 1;
912                 *buf_id = descs[avail_idx].id;
913
914                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
915                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
916                                                         &descs[avail_idx],
917                                                         &vec_id, buf_vec,
918                                                         len, perm) < 0))
919                                 return -1;
920                 } else {
921                         dlen = descs[avail_idx].len;
922                         *len += dlen;
923
924                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
925                                                         descs[avail_idx].addr,
926                                                         dlen,
927                                                         perm)))
928                                 return -1;
929                 }
930
931                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
932                         break;
933
934                 if (++avail_idx >= vq->size) {
935                         avail_idx -= vq->size;
936                         wrap_counter ^= 1;
937                 }
938         }
939
940         *vec_idx = vec_id;
941
942         return 0;
943 }
944
945 static __rte_noinline void
946 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
947                 struct buf_vector *buf_vec,
948                 struct virtio_net_hdr_mrg_rxbuf *hdr)
949 {
950         uint64_t len;
951         uint64_t remain = dev->vhost_hlen;
952         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
953         uint64_t iova = buf_vec->buf_iova;
954
955         while (remain) {
956                 len = RTE_MIN(remain,
957                                 buf_vec->buf_len);
958                 dst = buf_vec->buf_addr;
959                 rte_memcpy((void *)(uintptr_t)dst,
960                                 (void *)(uintptr_t)src,
961                                 len);
962
963                 PRINT_PACKET(dev, (uintptr_t)dst,
964                                 (uint32_t)len, 0);
965                 vhost_log_cache_write_iova(dev, vq,
966                                 iova, len);
967
968                 remain -= len;
969                 iova += len;
970                 src += len;
971                 buf_vec++;
972         }
973 }
974
975 static __rte_always_inline int
976 async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)
977 {
978         struct vhost_iov_iter *iter;
979
980         if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
981                 VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
982                 return -1;
983         }
984
985         iter = async->iov_iter + async->iter_idx;
986         iter->iov = async->iovec + async->iovec_idx;
987         iter->nr_segs = 0;
988
989         return 0;
990 }
991
992 static __rte_always_inline int
993 async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,
994                 void *src, void *dst, size_t len)
995 {
996         struct vhost_iov_iter *iter;
997         struct vhost_iovec *iovec;
998
999         if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
1000                 static bool vhost_max_async_vec_log;
1001
1002                 if (!vhost_max_async_vec_log) {
1003                         VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
1004                         vhost_max_async_vec_log = true;
1005                 }
1006
1007                 return -1;
1008         }
1009
1010         iter = async->iov_iter + async->iter_idx;
1011         iovec = async->iovec + async->iovec_idx;
1012
1013         iovec->src_addr = src;
1014         iovec->dst_addr = dst;
1015         iovec->len = len;
1016
1017         iter->nr_segs++;
1018         async->iovec_idx++;
1019
1020         return 0;
1021 }
1022
1023 static __rte_always_inline void
1024 async_iter_finalize(struct vhost_async *async)
1025 {
1026         async->iter_idx++;
1027 }
1028
1029 static __rte_always_inline void
1030 async_iter_cancel(struct vhost_async *async)
1031 {
1032         struct vhost_iov_iter *iter;
1033
1034         iter = async->iov_iter + async->iter_idx;
1035         async->iovec_idx -= iter->nr_segs;
1036         iter->nr_segs = 0;
1037         iter->iov = NULL;
1038 }
1039
1040 static __rte_always_inline void
1041 async_iter_reset(struct vhost_async *async)
1042 {
1043         async->iter_idx = 0;
1044         async->iovec_idx = 0;
1045 }
1046
1047 static __rte_always_inline int
1048 async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
1049                 struct rte_mbuf *m, uint32_t mbuf_offset,
1050                 uint64_t buf_iova, uint32_t cpy_len)
1051 {
1052         struct vhost_async *async = vq->async;
1053         uint64_t mapped_len;
1054         uint32_t buf_offset = 0;
1055         void *host_iova;
1056
1057         while (cpy_len) {
1058                 host_iova = (void *)(uintptr_t)gpa_to_first_hpa(dev,
1059                                 buf_iova + buf_offset, cpy_len, &mapped_len);
1060                 if (unlikely(!host_iova)) {
1061                         VHOST_LOG_DATA(ERR, "(%s) %s: failed to get host iova.\n",
1062                                        dev->ifname, __func__);
1063                         return -1;
1064                 }
1065
1066                 if (unlikely(async_iter_add_iovec(dev, async,
1067                                                 (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
1068                                                         mbuf_offset),
1069                                                 host_iova, (size_t)mapped_len)))
1070                         return -1;
1071
1072                 cpy_len -= (uint32_t)mapped_len;
1073                 mbuf_offset += (uint32_t)mapped_len;
1074                 buf_offset += (uint32_t)mapped_len;
1075         }
1076
1077         return 0;
1078 }
1079
1080 static __rte_always_inline void
1081 sync_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
1082                 struct rte_mbuf *m, uint32_t mbuf_offset,
1083                 uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len)
1084 {
1085         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1086
1087         if (likely(cpy_len > MAX_BATCH_LEN || vq->batch_copy_nb_elems >= vq->size)) {
1088                 rte_memcpy((void *)((uintptr_t)(buf_addr)),
1089                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
1090                                 cpy_len);
1091                 vhost_log_cache_write_iova(dev, vq, buf_iova, cpy_len);
1092                 PRINT_PACKET(dev, (uintptr_t)(buf_addr), cpy_len, 0);
1093         } else {
1094                 batch_copy[vq->batch_copy_nb_elems].dst =
1095                         (void *)((uintptr_t)(buf_addr));
1096                 batch_copy[vq->batch_copy_nb_elems].src =
1097                         rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
1098                 batch_copy[vq->batch_copy_nb_elems].log_addr = buf_iova;
1099                 batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
1100                 vq->batch_copy_nb_elems++;
1101         }
1102 }
1103
1104 static __rte_always_inline int
1105 mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
1106                 struct rte_mbuf *m, struct buf_vector *buf_vec,
1107                 uint16_t nr_vec, uint16_t num_buffers, bool is_async)
1108 {
1109         uint32_t vec_idx = 0;
1110         uint32_t mbuf_offset, mbuf_avail;
1111         uint32_t buf_offset, buf_avail;
1112         uint64_t buf_addr, buf_iova, buf_len;
1113         uint32_t cpy_len;
1114         uint64_t hdr_addr;
1115         struct rte_mbuf *hdr_mbuf;
1116         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
1117         struct vhost_async *async = vq->async;
1118
1119         if (unlikely(m == NULL))
1120                 return -1;
1121
1122         buf_addr = buf_vec[vec_idx].buf_addr;
1123         buf_iova = buf_vec[vec_idx].buf_iova;
1124         buf_len = buf_vec[vec_idx].buf_len;
1125
1126         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
1127                 return -1;
1128
1129         hdr_mbuf = m;
1130         hdr_addr = buf_addr;
1131         if (unlikely(buf_len < dev->vhost_hlen)) {
1132                 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
1133                 hdr = &tmp_hdr;
1134         } else
1135                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
1136
1137         VHOST_LOG_DATA(DEBUG, "(%s) RX: num merge buffers %d\n",
1138                 dev->ifname, num_buffers);
1139
1140         if (unlikely(buf_len < dev->vhost_hlen)) {
1141                 buf_offset = dev->vhost_hlen - buf_len;
1142                 vec_idx++;
1143                 buf_addr = buf_vec[vec_idx].buf_addr;
1144                 buf_iova = buf_vec[vec_idx].buf_iova;
1145                 buf_len = buf_vec[vec_idx].buf_len;
1146                 buf_avail = buf_len - buf_offset;
1147         } else {
1148                 buf_offset = dev->vhost_hlen;
1149                 buf_avail = buf_len - dev->vhost_hlen;
1150         }
1151
1152         mbuf_avail  = rte_pktmbuf_data_len(m);
1153         mbuf_offset = 0;
1154
1155         if (is_async) {
1156                 if (async_iter_initialize(dev, async))
1157                         return -1;
1158         }
1159
1160         while (mbuf_avail != 0 || m->next != NULL) {
1161                 /* done with current buf, get the next one */
1162                 if (buf_avail == 0) {
1163                         vec_idx++;
1164                         if (unlikely(vec_idx >= nr_vec))
1165                                 goto error;
1166
1167                         buf_addr = buf_vec[vec_idx].buf_addr;
1168                         buf_iova = buf_vec[vec_idx].buf_iova;
1169                         buf_len = buf_vec[vec_idx].buf_len;
1170
1171                         buf_offset = 0;
1172                         buf_avail  = buf_len;
1173                 }
1174
1175                 /* done with current mbuf, get the next one */
1176                 if (mbuf_avail == 0) {
1177                         m = m->next;
1178
1179                         mbuf_offset = 0;
1180                         mbuf_avail  = rte_pktmbuf_data_len(m);
1181                 }
1182
1183                 if (hdr_addr) {
1184                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1185                         if (rxvq_is_mergeable(dev))
1186                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1187                                                 num_buffers);
1188
1189                         if (unlikely(hdr == &tmp_hdr)) {
1190                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1191                         } else {
1192                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1193                                                 dev->vhost_hlen, 0);
1194                                 vhost_log_cache_write_iova(dev, vq,
1195                                                 buf_vec[0].buf_iova,
1196                                                 dev->vhost_hlen);
1197                         }
1198
1199                         hdr_addr = 0;
1200                 }
1201
1202                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1203
1204                 if (is_async) {
1205                         if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
1206                                                 buf_iova + buf_offset, cpy_len) < 0)
1207                                 goto error;
1208                 } else {
1209                         sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
1210                                         buf_addr + buf_offset,
1211                                         buf_iova + buf_offset, cpy_len);
1212                 }
1213
1214                 mbuf_avail  -= cpy_len;
1215                 mbuf_offset += cpy_len;
1216                 buf_avail  -= cpy_len;
1217                 buf_offset += cpy_len;
1218         }
1219
1220         if (is_async)
1221                 async_iter_finalize(async);
1222
1223         return 0;
1224 error:
1225         if (is_async)
1226                 async_iter_cancel(async);
1227
1228         return -1;
1229 }
1230
1231 static __rte_always_inline int
1232 vhost_enqueue_single_packed(struct virtio_net *dev,
1233                             struct vhost_virtqueue *vq,
1234                             struct rte_mbuf *pkt,
1235                             struct buf_vector *buf_vec,
1236                             uint16_t *nr_descs)
1237 {
1238         uint16_t nr_vec = 0;
1239         uint16_t avail_idx = vq->last_avail_idx;
1240         uint16_t max_tries, tries = 0;
1241         uint16_t buf_id = 0;
1242         uint32_t len = 0;
1243         uint16_t desc_count;
1244         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1245         uint16_t num_buffers = 0;
1246         uint32_t buffer_len[vq->size];
1247         uint16_t buffer_buf_id[vq->size];
1248         uint16_t buffer_desc_count[vq->size];
1249
1250         if (rxvq_is_mergeable(dev))
1251                 max_tries = vq->size - 1;
1252         else
1253                 max_tries = 1;
1254
1255         while (size > 0) {
1256                 /*
1257                  * if we tried all available ring items, and still
1258                  * can't get enough buf, it means something abnormal
1259                  * happened.
1260                  */
1261                 if (unlikely(++tries > max_tries))
1262                         return -1;
1263
1264                 if (unlikely(fill_vec_buf_packed(dev, vq,
1265                                                 avail_idx, &desc_count,
1266                                                 buf_vec, &nr_vec,
1267                                                 &buf_id, &len,
1268                                                 VHOST_ACCESS_RW) < 0))
1269                         return -1;
1270
1271                 len = RTE_MIN(len, size);
1272                 size -= len;
1273
1274                 buffer_len[num_buffers] = len;
1275                 buffer_buf_id[num_buffers] = buf_id;
1276                 buffer_desc_count[num_buffers] = desc_count;
1277                 num_buffers += 1;
1278
1279                 *nr_descs += desc_count;
1280                 avail_idx += desc_count;
1281                 if (avail_idx >= vq->size)
1282                         avail_idx -= vq->size;
1283         }
1284
1285         if (mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers, false) < 0)
1286                 return -1;
1287
1288         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1289                                            buffer_desc_count, num_buffers);
1290
1291         return 0;
1292 }
1293
1294 static __rte_noinline uint32_t
1295 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1296         struct rte_mbuf **pkts, uint32_t count)
1297 {
1298         uint32_t pkt_idx = 0;
1299         uint16_t num_buffers;
1300         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1301         uint16_t avail_head;
1302
1303         /*
1304          * The ordering between avail index and
1305          * desc reads needs to be enforced.
1306          */
1307         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1308
1309         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1310
1311         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1312                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1313                 uint16_t nr_vec = 0;
1314
1315                 if (unlikely(reserve_avail_buf_split(dev, vq,
1316                                                 pkt_len, buf_vec, &num_buffers,
1317                                                 avail_head, &nr_vec) < 0)) {
1318                         VHOST_LOG_DATA(DEBUG,
1319                                 "(%s) failed to get enough desc from vring\n",
1320                                 dev->ifname);
1321                         vq->shadow_used_idx -= num_buffers;
1322                         break;
1323                 }
1324
1325                 VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1326                         dev->ifname, vq->last_avail_idx,
1327                         vq->last_avail_idx + num_buffers);
1328
1329                 if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
1330                                         num_buffers, false) < 0) {
1331                         vq->shadow_used_idx -= num_buffers;
1332                         break;
1333                 }
1334
1335                 vq->last_avail_idx += num_buffers;
1336         }
1337
1338         do_data_copy_enqueue(dev, vq);
1339
1340         if (likely(vq->shadow_used_idx)) {
1341                 flush_shadow_used_ring_split(dev, vq);
1342                 vhost_vring_call_split(dev, vq);
1343         }
1344
1345         return pkt_idx;
1346 }
1347
1348 static __rte_always_inline int
1349 virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
1350                            struct vhost_virtqueue *vq,
1351                            struct rte_mbuf **pkts,
1352                            uint64_t *desc_addrs,
1353                            uint64_t *lens)
1354 {
1355         bool wrap_counter = vq->avail_wrap_counter;
1356         struct vring_packed_desc *descs = vq->desc_packed;
1357         uint16_t avail_idx = vq->last_avail_idx;
1358         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1359         uint16_t i;
1360
1361         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1362                 return -1;
1363
1364         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1365                 return -1;
1366
1367         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1368                 if (unlikely(pkts[i]->next != NULL))
1369                         return -1;
1370                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1371                                             wrap_counter)))
1372                         return -1;
1373         }
1374
1375         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1376                 lens[i] = descs[avail_idx + i].len;
1377
1378         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1379                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1380                         return -1;
1381         }
1382
1383         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1384                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1385                                                   descs[avail_idx + i].addr,
1386                                                   &lens[i],
1387                                                   VHOST_ACCESS_RW);
1388
1389         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1390                 if (unlikely(!desc_addrs[i]))
1391                         return -1;
1392                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1393                         return -1;
1394         }
1395
1396         return 0;
1397 }
1398
1399 static __rte_always_inline void
1400 virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
1401                            struct vhost_virtqueue *vq,
1402                            struct rte_mbuf **pkts,
1403                            uint64_t *desc_addrs,
1404                            uint64_t *lens)
1405 {
1406         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1407         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1408         struct vring_packed_desc *descs = vq->desc_packed;
1409         uint16_t avail_idx = vq->last_avail_idx;
1410         uint16_t ids[PACKED_BATCH_SIZE];
1411         uint16_t i;
1412
1413         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1414                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1415                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1416                                         (uintptr_t)desc_addrs[i];
1417                 lens[i] = pkts[i]->pkt_len +
1418                         sizeof(struct virtio_net_hdr_mrg_rxbuf);
1419         }
1420
1421         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1422                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1423
1424         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1425
1426         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1427                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1428                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1429                            pkts[i]->pkt_len);
1430         }
1431
1432         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1433                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1434                                            lens[i]);
1435
1436         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1437                 ids[i] = descs[avail_idx + i].id;
1438
1439         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1440 }
1441
1442 static __rte_always_inline int
1443 virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
1444                            struct vhost_virtqueue *vq,
1445                            struct rte_mbuf **pkts)
1446 {
1447         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1448         uint64_t lens[PACKED_BATCH_SIZE];
1449
1450         if (virtio_dev_rx_sync_batch_check(dev, vq, pkts, desc_addrs, lens) == -1)
1451                 return -1;
1452
1453         if (vq->shadow_used_idx) {
1454                 do_data_copy_enqueue(dev, vq);
1455                 vhost_flush_enqueue_shadow_packed(dev, vq);
1456         }
1457
1458         virtio_dev_rx_batch_packed_copy(dev, vq, pkts, desc_addrs, lens);
1459
1460         return 0;
1461 }
1462
1463 static __rte_always_inline int16_t
1464 virtio_dev_rx_single_packed(struct virtio_net *dev,
1465                             struct vhost_virtqueue *vq,
1466                             struct rte_mbuf *pkt)
1467 {
1468         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1469         uint16_t nr_descs = 0;
1470
1471         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1472                                                  &nr_descs) < 0)) {
1473                 VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
1474                                 dev->ifname);
1475                 return -1;
1476         }
1477
1478         VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1479                         dev->ifname, vq->last_avail_idx,
1480                         vq->last_avail_idx + nr_descs);
1481
1482         vq_inc_last_avail_packed(vq, nr_descs);
1483
1484         return 0;
1485 }
1486
1487 static __rte_noinline uint32_t
1488 virtio_dev_rx_packed(struct virtio_net *dev,
1489                      struct vhost_virtqueue *__rte_restrict vq,
1490                      struct rte_mbuf **__rte_restrict pkts,
1491                      uint32_t count)
1492 {
1493         uint32_t pkt_idx = 0;
1494
1495         do {
1496                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1497
1498                 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
1499                         if (!virtio_dev_rx_sync_batch_packed(dev, vq,
1500                                                         &pkts[pkt_idx])) {
1501                                 pkt_idx += PACKED_BATCH_SIZE;
1502                                 continue;
1503                         }
1504                 }
1505
1506                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1507                         break;
1508                 pkt_idx++;
1509
1510         } while (pkt_idx < count);
1511
1512         if (vq->shadow_used_idx) {
1513                 do_data_copy_enqueue(dev, vq);
1514                 vhost_flush_enqueue_shadow_packed(dev, vq);
1515         }
1516
1517         if (pkt_idx)
1518                 vhost_vring_call_packed(dev, vq);
1519
1520         return pkt_idx;
1521 }
1522
1523 static __rte_always_inline uint32_t
1524 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1525         struct rte_mbuf **pkts, uint32_t count)
1526 {
1527         struct vhost_virtqueue *vq;
1528         uint32_t nb_tx = 0;
1529
1530         VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
1531         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1532                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
1533                         dev->ifname, __func__, queue_id);
1534                 return 0;
1535         }
1536
1537         vq = dev->virtqueue[queue_id];
1538
1539         rte_spinlock_lock(&vq->access_lock);
1540
1541         if (unlikely(!vq->enabled))
1542                 goto out_access_unlock;
1543
1544         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1545                 vhost_user_iotlb_rd_lock(vq);
1546
1547         if (unlikely(!vq->access_ok))
1548                 if (unlikely(vring_translate(dev, vq) < 0))
1549                         goto out;
1550
1551         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1552         if (count == 0)
1553                 goto out;
1554
1555         if (vq_is_packed(dev))
1556                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1557         else
1558                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1559
1560         vhost_queue_stats_update(dev, vq, pkts, nb_tx);
1561
1562 out:
1563         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1564                 vhost_user_iotlb_rd_unlock(vq);
1565
1566 out_access_unlock:
1567         rte_spinlock_unlock(&vq->access_lock);
1568
1569         return nb_tx;
1570 }
1571
1572 uint16_t
1573 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1574         struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1575 {
1576         struct virtio_net *dev = get_device(vid);
1577
1578         if (!dev)
1579                 return 0;
1580
1581         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1582                 VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
1583                         dev->ifname, __func__);
1584                 return 0;
1585         }
1586
1587         return virtio_dev_rx(dev, queue_id, pkts, count);
1588 }
1589
1590 static __rte_always_inline uint16_t
1591 async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
1592 {
1593         struct vhost_async *async = vq->async;
1594
1595         if (async->pkts_idx >= async->pkts_inflight_n)
1596                 return async->pkts_idx - async->pkts_inflight_n;
1597         else
1598                 return vq->size - async->pkts_inflight_n + async->pkts_idx;
1599 }
1600
1601 static __rte_always_inline void
1602 store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem *d_ring,
1603                 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1604 {
1605         size_t elem_size = sizeof(struct vring_used_elem);
1606
1607         if (d_idx + count <= ring_size) {
1608                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1609         } else {
1610                 uint16_t size = ring_size - d_idx;
1611
1612                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1613                 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1614         }
1615 }
1616
1617 static __rte_always_inline void
1618 store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
1619                 struct vring_used_elem_packed *d_ring,
1620                 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1621 {
1622         size_t elem_size = sizeof(struct vring_used_elem_packed);
1623
1624         if (d_idx + count <= ring_size) {
1625                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1626         } else {
1627                 uint16_t size = ring_size - d_idx;
1628
1629                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1630                 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1631         }
1632 }
1633
1634 static __rte_noinline uint32_t
1635 virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1636                 uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
1637                 int16_t dma_id, uint16_t vchan_id)
1638 {
1639         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1640         uint32_t pkt_idx = 0;
1641         uint16_t num_buffers;
1642         uint16_t avail_head;
1643
1644         struct vhost_async *async = vq->async;
1645         struct async_inflight_info *pkts_info = async->pkts_info;
1646         uint32_t pkt_err = 0;
1647         uint16_t n_xfer;
1648         uint16_t slot_idx = 0;
1649
1650         /*
1651          * The ordering between avail index and desc reads need to be enforced.
1652          */
1653         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1654
1655         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1656
1657         async_iter_reset(async);
1658
1659         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1660                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1661                 uint16_t nr_vec = 0;
1662
1663                 if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
1664                                                 &num_buffers, avail_head, &nr_vec) < 0)) {
1665                         VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
1666                                         dev->ifname);
1667                         vq->shadow_used_idx -= num_buffers;
1668                         break;
1669                 }
1670
1671                 VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1672                         dev->ifname, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
1673
1674                 if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
1675                         vq->shadow_used_idx -= num_buffers;
1676                         break;
1677                 }
1678
1679                 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
1680                 pkts_info[slot_idx].descs = num_buffers;
1681                 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1682
1683                 vq->last_avail_idx += num_buffers;
1684         }
1685
1686         if (unlikely(pkt_idx == 0))
1687                 return 0;
1688
1689         n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
1690                         async->iov_iter, pkt_idx);
1691
1692         pkt_err = pkt_idx - n_xfer;
1693         if (unlikely(pkt_err)) {
1694                 uint16_t num_descs = 0;
1695
1696                 VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer %u packets for queue %u.\n",
1697                                 dev->ifname, __func__, pkt_err, queue_id);
1698
1699                 /* update number of completed packets */
1700                 pkt_idx = n_xfer;
1701
1702                 /* calculate the sum of descriptors to revert */
1703                 while (pkt_err-- > 0) {
1704                         num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
1705                         slot_idx--;
1706                 }
1707
1708                 /* recover shadow used ring and available ring */
1709                 vq->shadow_used_idx -= num_descs;
1710                 vq->last_avail_idx -= num_descs;
1711         }
1712
1713         /* keep used descriptors */
1714         if (likely(vq->shadow_used_idx)) {
1715                 uint16_t to = async->desc_idx_split & (vq->size - 1);
1716
1717                 store_dma_desc_info_split(vq->shadow_used_split,
1718                                 async->descs_split, vq->size, 0, to,
1719                                 vq->shadow_used_idx);
1720
1721                 async->desc_idx_split += vq->shadow_used_idx;
1722
1723                 async->pkts_idx += pkt_idx;
1724                 if (async->pkts_idx >= vq->size)
1725                         async->pkts_idx -= vq->size;
1726
1727                 async->pkts_inflight_n += pkt_idx;
1728                 vq->shadow_used_idx = 0;
1729         }
1730
1731         return pkt_idx;
1732 }
1733
1734
1735 static __rte_always_inline int
1736 vhost_enqueue_async_packed(struct virtio_net *dev,
1737                             struct vhost_virtqueue *vq,
1738                             struct rte_mbuf *pkt,
1739                             struct buf_vector *buf_vec,
1740                             uint16_t *nr_descs,
1741                             uint16_t *nr_buffers)
1742 {
1743         uint16_t nr_vec = 0;
1744         uint16_t avail_idx = vq->last_avail_idx;
1745         uint16_t max_tries, tries = 0;
1746         uint16_t buf_id = 0;
1747         uint32_t len = 0;
1748         uint16_t desc_count = 0;
1749         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1750         uint32_t buffer_len[vq->size];
1751         uint16_t buffer_buf_id[vq->size];
1752         uint16_t buffer_desc_count[vq->size];
1753
1754         if (rxvq_is_mergeable(dev))
1755                 max_tries = vq->size - 1;
1756         else
1757                 max_tries = 1;
1758
1759         while (size > 0) {
1760                 /*
1761                  * if we tried all available ring items, and still
1762                  * can't get enough buf, it means something abnormal
1763                  * happened.
1764                  */
1765                 if (unlikely(++tries > max_tries))
1766                         return -1;
1767
1768                 if (unlikely(fill_vec_buf_packed(dev, vq,
1769                                                 avail_idx, &desc_count,
1770                                                 buf_vec, &nr_vec,
1771                                                 &buf_id, &len,
1772                                                 VHOST_ACCESS_RW) < 0))
1773                         return -1;
1774
1775                 len = RTE_MIN(len, size);
1776                 size -= len;
1777
1778                 buffer_len[*nr_buffers] = len;
1779                 buffer_buf_id[*nr_buffers] = buf_id;
1780                 buffer_desc_count[*nr_buffers] = desc_count;
1781                 *nr_buffers += 1;
1782                 *nr_descs += desc_count;
1783                 avail_idx += desc_count;
1784                 if (avail_idx >= vq->size)
1785                         avail_idx -= vq->size;
1786         }
1787
1788         if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0))
1789                 return -1;
1790
1791         vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
1792
1793         return 0;
1794 }
1795
1796 static __rte_always_inline int16_t
1797 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1798                             struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
1799 {
1800         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1801
1802         if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
1803                                         nr_descs, nr_buffers) < 0)) {
1804                 VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n", dev->ifname);
1805                 return -1;
1806         }
1807
1808         VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1809                         dev->ifname, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
1810
1811         return 0;
1812 }
1813
1814 static __rte_always_inline void
1815 dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
1816                         uint32_t nr_err, uint32_t *pkt_idx)
1817 {
1818         uint16_t descs_err = 0;
1819         uint16_t buffers_err = 0;
1820         struct async_inflight_info *pkts_info = vq->async->pkts_info;
1821
1822         *pkt_idx -= nr_err;
1823         /* calculate the sum of buffers and descs of DMA-error packets. */
1824         while (nr_err-- > 0) {
1825                 descs_err += pkts_info[slot_idx % vq->size].descs;
1826                 buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
1827                 slot_idx--;
1828         }
1829
1830         if (vq->last_avail_idx >= descs_err) {
1831                 vq->last_avail_idx -= descs_err;
1832         } else {
1833                 vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
1834                 vq->avail_wrap_counter ^= 1;
1835         }
1836
1837         vq->shadow_used_idx -= buffers_err;
1838 }
1839
1840 static __rte_noinline uint32_t
1841 virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1842                 uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
1843                 int16_t dma_id, uint16_t vchan_id)
1844 {
1845         uint32_t pkt_idx = 0;
1846         uint32_t remained = count;
1847         uint16_t n_xfer;
1848         uint16_t num_buffers;
1849         uint16_t num_descs;
1850
1851         struct vhost_async *async = vq->async;
1852         struct async_inflight_info *pkts_info = async->pkts_info;
1853         uint32_t pkt_err = 0;
1854         uint16_t slot_idx = 0;
1855
1856         do {
1857                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1858
1859                 num_buffers = 0;
1860                 num_descs = 0;
1861                 if (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],
1862                                                 &num_descs, &num_buffers) < 0))
1863                         break;
1864
1865                 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
1866
1867                 pkts_info[slot_idx].descs = num_descs;
1868                 pkts_info[slot_idx].nr_buffers = num_buffers;
1869                 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1870
1871                 pkt_idx++;
1872                 remained--;
1873                 vq_inc_last_avail_packed(vq, num_descs);
1874         } while (pkt_idx < count);
1875
1876         if (unlikely(pkt_idx == 0))
1877                 return 0;
1878
1879         n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
1880                         async->iov_iter, pkt_idx);
1881
1882         async_iter_reset(async);
1883
1884         pkt_err = pkt_idx - n_xfer;
1885         if (unlikely(pkt_err)) {
1886                 VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer %u packets for queue %u.\n",
1887                                 dev->ifname, __func__, pkt_err, queue_id);
1888                 dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
1889         }
1890
1891         if (likely(vq->shadow_used_idx)) {
1892                 /* keep used descriptors. */
1893                 store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
1894                                         vq->size, 0, async->buffer_idx_packed,
1895                                         vq->shadow_used_idx);
1896
1897                 async->buffer_idx_packed += vq->shadow_used_idx;
1898                 if (async->buffer_idx_packed >= vq->size)
1899                         async->buffer_idx_packed -= vq->size;
1900
1901                 async->pkts_idx += pkt_idx;
1902                 if (async->pkts_idx >= vq->size)
1903                         async->pkts_idx -= vq->size;
1904
1905                 vq->shadow_used_idx = 0;
1906                 async->pkts_inflight_n += pkt_idx;
1907         }
1908
1909         return pkt_idx;
1910 }
1911
1912 static __rte_always_inline void
1913 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
1914 {
1915         struct vhost_async *async = vq->async;
1916         uint16_t nr_left = n_descs;
1917         uint16_t nr_copy;
1918         uint16_t to, from;
1919
1920         do {
1921                 from = async->last_desc_idx_split & (vq->size - 1);
1922                 nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
1923                 to = vq->last_used_idx & (vq->size - 1);
1924
1925                 if (to + nr_copy <= vq->size) {
1926                         rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1927                                         nr_copy * sizeof(struct vring_used_elem));
1928                 } else {
1929                         uint16_t size = vq->size - to;
1930
1931                         rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1932                                         size * sizeof(struct vring_used_elem));
1933                         rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
1934                                         (nr_copy - size) * sizeof(struct vring_used_elem));
1935                 }
1936
1937                 async->last_desc_idx_split += nr_copy;
1938                 vq->last_used_idx += nr_copy;
1939                 nr_left -= nr_copy;
1940         } while (nr_left > 0);
1941 }
1942
1943 static __rte_always_inline void
1944 write_back_completed_descs_packed(struct vhost_virtqueue *vq,
1945                                 uint16_t n_buffers)
1946 {
1947         struct vhost_async *async = vq->async;
1948         uint16_t from = async->last_buffer_idx_packed;
1949         uint16_t used_idx = vq->last_used_idx;
1950         uint16_t head_idx = vq->last_used_idx;
1951         uint16_t head_flags = 0;
1952         uint16_t i;
1953
1954         /* Split loop in two to save memory barriers */
1955         for (i = 0; i < n_buffers; i++) {
1956                 vq->desc_packed[used_idx].id = async->buffers_packed[from].id;
1957                 vq->desc_packed[used_idx].len = async->buffers_packed[from].len;
1958
1959                 used_idx += async->buffers_packed[from].count;
1960                 if (used_idx >= vq->size)
1961                         used_idx -= vq->size;
1962
1963                 from++;
1964                 if (from >= vq->size)
1965                         from = 0;
1966         }
1967
1968         /* The ordering for storing desc flags needs to be enforced. */
1969         rte_atomic_thread_fence(__ATOMIC_RELEASE);
1970
1971         from = async->last_buffer_idx_packed;
1972
1973         for (i = 0; i < n_buffers; i++) {
1974                 uint16_t flags;
1975
1976                 if (async->buffers_packed[from].len)
1977                         flags = VRING_DESC_F_WRITE;
1978                 else
1979                         flags = 0;
1980
1981                 if (vq->used_wrap_counter) {
1982                         flags |= VRING_DESC_F_USED;
1983                         flags |= VRING_DESC_F_AVAIL;
1984                 } else {
1985                         flags &= ~VRING_DESC_F_USED;
1986                         flags &= ~VRING_DESC_F_AVAIL;
1987                 }
1988
1989                 if (i > 0) {
1990                         vq->desc_packed[vq->last_used_idx].flags = flags;
1991                 } else {
1992                         head_idx = vq->last_used_idx;
1993                         head_flags = flags;
1994                 }
1995
1996                 vq_inc_last_used_packed(vq, async->buffers_packed[from].count);
1997
1998                 from++;
1999                 if (from == vq->size)
2000                         from = 0;
2001         }
2002
2003         vq->desc_packed[head_idx].flags = head_flags;
2004         async->last_buffer_idx_packed = from;
2005 }
2006
2007 static __rte_always_inline uint16_t
2008 vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
2009                 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2010                 uint16_t vchan_id)
2011 {
2012         struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
2013         struct vhost_async *async = vq->async;
2014         struct async_inflight_info *pkts_info = async->pkts_info;
2015         uint16_t nr_cpl_pkts = 0;
2016         uint16_t n_descs = 0, n_buffers = 0;
2017         uint16_t start_idx, from, i;
2018
2019         /* Check completed copies for the given DMA vChannel */
2020         vhost_async_dma_check_completed(dev, dma_id, vchan_id, VHOST_DMA_MAX_COPY_COMPLETE);
2021
2022         start_idx = async_get_first_inflight_pkt_idx(vq);
2023         /**
2024          * Calculate the number of copy completed packets.
2025          * Note that there may be completed packets even if
2026          * no copies are reported done by the given DMA vChannel,
2027          * as it's possible that a virtqueue uses multiple DMA
2028          * vChannels.
2029          */
2030         from = start_idx;
2031         while (vq->async->pkts_cmpl_flag[from] && count--) {
2032                 vq->async->pkts_cmpl_flag[from] = false;
2033                 from++;
2034                 if (from >= vq->size)
2035                         from -= vq->size;
2036                 nr_cpl_pkts++;
2037         }
2038
2039         if (nr_cpl_pkts == 0)
2040                 return 0;
2041
2042         for (i = 0; i < nr_cpl_pkts; i++) {
2043                 from = (start_idx + i) % vq->size;
2044                 /* Only used with packed ring */
2045                 n_buffers += pkts_info[from].nr_buffers;
2046                 /* Only used with split ring */
2047                 n_descs += pkts_info[from].descs;
2048                 pkts[i] = pkts_info[from].mbuf;
2049         }
2050
2051         async->pkts_inflight_n -= nr_cpl_pkts;
2052
2053         if (likely(vq->enabled && vq->access_ok)) {
2054                 if (vq_is_packed(dev)) {
2055                         write_back_completed_descs_packed(vq, n_buffers);
2056                         vhost_vring_call_packed(dev, vq);
2057                 } else {
2058                         write_back_completed_descs_split(vq, n_descs);
2059                         __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
2060                         vhost_vring_call_split(dev, vq);
2061                 }
2062         } else {
2063                 if (vq_is_packed(dev)) {
2064                         async->last_buffer_idx_packed += n_buffers;
2065                         if (async->last_buffer_idx_packed >= vq->size)
2066                                 async->last_buffer_idx_packed -= vq->size;
2067                 } else {
2068                         async->last_desc_idx_split += n_descs;
2069                 }
2070         }
2071
2072         return nr_cpl_pkts;
2073 }
2074
2075 uint16_t
2076 rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
2077                 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2078                 uint16_t vchan_id)
2079 {
2080         struct virtio_net *dev = get_device(vid);
2081         struct vhost_virtqueue *vq;
2082         uint16_t n_pkts_cpl = 0;
2083
2084         if (unlikely(!dev))
2085                 return 0;
2086
2087         VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2088         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2089                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
2090                         dev->ifname, __func__, queue_id);
2091                 return 0;
2092         }
2093
2094         if (unlikely(!dma_copy_track[dma_id].vchans ||
2095                                 !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
2096                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
2097                                dma_id, vchan_id);
2098                 return 0;
2099         }
2100
2101         vq = dev->virtqueue[queue_id];
2102
2103         if (!rte_spinlock_trylock(&vq->access_lock)) {
2104                 VHOST_LOG_DATA(DEBUG, "(%s) %s: virtqueue %u is busy.\n", dev->ifname, __func__,
2105                                 queue_id);
2106                 return 0;
2107         }
2108
2109         if (unlikely(!vq->async)) {
2110                 VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for virtqueue %d.\n",
2111                                 dev->ifname, __func__, queue_id);
2112                 goto out;
2113         }
2114
2115         n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
2116
2117         vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
2118         vq->stats.inflight_completed += n_pkts_cpl;
2119
2120 out:
2121         rte_spinlock_unlock(&vq->access_lock);
2122
2123         return n_pkts_cpl;
2124 }
2125
2126 uint16_t
2127 rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
2128                 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2129                 uint16_t vchan_id)
2130 {
2131         struct virtio_net *dev = get_device(vid);
2132         struct vhost_virtqueue *vq;
2133         uint16_t n_pkts_cpl = 0;
2134
2135         if (!dev)
2136                 return 0;
2137
2138         VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2139         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2140                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
2141                         dev->ifname, __func__, queue_id);
2142                 return 0;
2143         }
2144
2145         vq = dev->virtqueue[queue_id];
2146
2147         if (unlikely(!vq->async)) {
2148                 VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
2149                         dev->ifname, __func__, queue_id);
2150                 return 0;
2151         }
2152
2153         if (unlikely(!dma_copy_track[dma_id].vchans ||
2154                                 !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
2155                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
2156                                 dma_id, vchan_id);
2157                 return 0;
2158         }
2159
2160         n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
2161
2162         vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
2163         vq->stats.inflight_completed += n_pkts_cpl;
2164
2165         return n_pkts_cpl;
2166 }
2167
2168 static __rte_always_inline uint32_t
2169 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
2170         struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
2171 {
2172         struct vhost_virtqueue *vq;
2173         uint32_t nb_tx = 0;
2174
2175         VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2176         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2177                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
2178                         dev->ifname, __func__, queue_id);
2179                 return 0;
2180         }
2181
2182         if (unlikely(!dma_copy_track[dma_id].vchans ||
2183                                 !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
2184                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
2185                                dma_id, vchan_id);
2186                 return 0;
2187         }
2188
2189         vq = dev->virtqueue[queue_id];
2190
2191         rte_spinlock_lock(&vq->access_lock);
2192
2193         if (unlikely(!vq->enabled || !vq->async))
2194                 goto out_access_unlock;
2195
2196         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2197                 vhost_user_iotlb_rd_lock(vq);
2198
2199         if (unlikely(!vq->access_ok))
2200                 if (unlikely(vring_translate(dev, vq) < 0))
2201                         goto out;
2202
2203         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
2204         if (count == 0)
2205                 goto out;
2206
2207         if (vq_is_packed(dev))
2208                 nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, queue_id,
2209                                 pkts, count, dma_id, vchan_id);
2210         else
2211                 nb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,
2212                                 pkts, count, dma_id, vchan_id);
2213
2214         vq->stats.inflight_submitted += nb_tx;
2215
2216 out:
2217         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2218                 vhost_user_iotlb_rd_unlock(vq);
2219
2220 out_access_unlock:
2221         rte_spinlock_unlock(&vq->access_lock);
2222
2223         return nb_tx;
2224 }
2225
2226 uint16_t
2227 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
2228                 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2229                 uint16_t vchan_id)
2230 {
2231         struct virtio_net *dev = get_device(vid);
2232
2233         if (!dev)
2234                 return 0;
2235
2236         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2237                 VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
2238                         dev->ifname, __func__);
2239                 return 0;
2240         }
2241
2242         return virtio_dev_rx_async_submit(dev, queue_id, pkts, count, dma_id, vchan_id);
2243 }
2244
2245 static inline bool
2246 virtio_net_with_host_offload(struct virtio_net *dev)
2247 {
2248         if (dev->features &
2249                         ((1ULL << VIRTIO_NET_F_CSUM) |
2250                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
2251                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2252                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
2253                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
2254                 return true;
2255
2256         return false;
2257 }
2258
2259 static int
2260 parse_headers(struct rte_mbuf *m, uint8_t *l4_proto)
2261 {
2262         struct rte_ipv4_hdr *ipv4_hdr;
2263         struct rte_ipv6_hdr *ipv6_hdr;
2264         struct rte_ether_hdr *eth_hdr;
2265         uint16_t ethertype;
2266         uint16_t data_len = rte_pktmbuf_data_len(m);
2267
2268         if (data_len < sizeof(struct rte_ether_hdr))
2269                 return -EINVAL;
2270
2271         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2272
2273         m->l2_len = sizeof(struct rte_ether_hdr);
2274         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
2275
2276         if (ethertype == RTE_ETHER_TYPE_VLAN) {
2277                 if (data_len < sizeof(struct rte_ether_hdr) +
2278                                 sizeof(struct rte_vlan_hdr))
2279                         goto error;
2280
2281                 struct rte_vlan_hdr *vlan_hdr =
2282                         (struct rte_vlan_hdr *)(eth_hdr + 1);
2283
2284                 m->l2_len += sizeof(struct rte_vlan_hdr);
2285                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
2286         }
2287
2288         switch (ethertype) {
2289         case RTE_ETHER_TYPE_IPV4:
2290                 if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))
2291                         goto error;
2292                 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2293                                 m->l2_len);
2294                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
2295                 if (data_len < m->l2_len + m->l3_len)
2296                         goto error;
2297                 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
2298                 *l4_proto = ipv4_hdr->next_proto_id;
2299                 break;
2300         case RTE_ETHER_TYPE_IPV6:
2301                 if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
2302                         goto error;
2303                 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
2304                                 m->l2_len);
2305                 m->l3_len = sizeof(struct rte_ipv6_hdr);
2306                 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
2307                 *l4_proto = ipv6_hdr->proto;
2308                 break;
2309         default:
2310                 /* a valid L3 header is needed for further L4 parsing */
2311                 goto error;
2312         }
2313
2314         /* both CSUM and GSO need a valid L4 header */
2315         switch (*l4_proto) {
2316         case IPPROTO_TCP:
2317                 if (data_len < m->l2_len + m->l3_len +
2318                                 sizeof(struct rte_tcp_hdr))
2319                         goto error;
2320                 break;
2321         case IPPROTO_UDP:
2322                 if (data_len < m->l2_len + m->l3_len +
2323                                 sizeof(struct rte_udp_hdr))
2324                         goto error;
2325                 break;
2326         case IPPROTO_SCTP:
2327                 if (data_len < m->l2_len + m->l3_len +
2328                                 sizeof(struct rte_sctp_hdr))
2329                         goto error;
2330                 break;
2331         default:
2332                 goto error;
2333         }
2334
2335         return 0;
2336
2337 error:
2338         m->l2_len = 0;
2339         m->l3_len = 0;
2340         m->ol_flags = 0;
2341         return -EINVAL;
2342 }
2343
2344 static __rte_always_inline void
2345 vhost_dequeue_offload_legacy(struct virtio_net *dev, struct virtio_net_hdr *hdr,
2346                 struct rte_mbuf *m)
2347 {
2348         uint8_t l4_proto = 0;
2349         struct rte_tcp_hdr *tcp_hdr = NULL;
2350         uint16_t tcp_len;
2351         uint16_t data_len = rte_pktmbuf_data_len(m);
2352
2353         if (parse_headers(m, &l4_proto) < 0)
2354                 return;
2355
2356         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2357                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
2358                         switch (hdr->csum_offset) {
2359                         case (offsetof(struct rte_tcp_hdr, cksum)):
2360                                 if (l4_proto != IPPROTO_TCP)
2361                                         goto error;
2362                                 m->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
2363                                 break;
2364                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
2365                                 if (l4_proto != IPPROTO_UDP)
2366                                         goto error;
2367                                 m->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
2368                                 break;
2369                         case (offsetof(struct rte_sctp_hdr, cksum)):
2370                                 if (l4_proto != IPPROTO_SCTP)
2371                                         goto error;
2372                                 m->ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
2373                                 break;
2374                         default:
2375                                 goto error;
2376                         }
2377                 } else {
2378                         goto error;
2379                 }
2380         }
2381
2382         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2383                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2384                 case VIRTIO_NET_HDR_GSO_TCPV4:
2385                 case VIRTIO_NET_HDR_GSO_TCPV6:
2386                         if (l4_proto != IPPROTO_TCP)
2387                                 goto error;
2388                         tcp_hdr = rte_pktmbuf_mtod_offset(m,
2389                                         struct rte_tcp_hdr *,
2390                                         m->l2_len + m->l3_len);
2391                         tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
2392                         if (data_len < m->l2_len + m->l3_len + tcp_len)
2393                                 goto error;
2394                         m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
2395                         m->tso_segsz = hdr->gso_size;
2396                         m->l4_len = tcp_len;
2397                         break;
2398                 case VIRTIO_NET_HDR_GSO_UDP:
2399                         if (l4_proto != IPPROTO_UDP)
2400                                 goto error;
2401                         m->ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
2402                         m->tso_segsz = hdr->gso_size;
2403                         m->l4_len = sizeof(struct rte_udp_hdr);
2404                         break;
2405                 default:
2406                         VHOST_LOG_DATA(WARNING, "(%s) unsupported gso type %u.\n",
2407                                         dev->ifname, hdr->gso_type);
2408                         goto error;
2409                 }
2410         }
2411         return;
2412
2413 error:
2414         m->l2_len = 0;
2415         m->l3_len = 0;
2416         m->ol_flags = 0;
2417 }
2418
2419 static __rte_always_inline void
2420 vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr,
2421                 struct rte_mbuf *m, bool legacy_ol_flags)
2422 {
2423         struct rte_net_hdr_lens hdr_lens;
2424         int l4_supported = 0;
2425         uint32_t ptype;
2426
2427         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
2428                 return;
2429
2430         if (legacy_ol_flags) {
2431                 vhost_dequeue_offload_legacy(dev, hdr, m);
2432                 return;
2433         }
2434
2435         m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
2436
2437         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
2438         m->packet_type = ptype;
2439         if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
2440             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
2441             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
2442                 l4_supported = 1;
2443
2444         /* According to Virtio 1.1 spec, the device only needs to look at
2445          * VIRTIO_NET_HDR_F_NEEDS_CSUM in the packet transmission path.
2446          * This differs from the processing incoming packets path where the
2447          * driver could rely on VIRTIO_NET_HDR_F_DATA_VALID flag set by the
2448          * device.
2449          *
2450          * 5.1.6.2.1 Driver Requirements: Packet Transmission
2451          * The driver MUST NOT set the VIRTIO_NET_HDR_F_DATA_VALID and
2452          * VIRTIO_NET_HDR_F_RSC_INFO bits in flags.
2453          *
2454          * 5.1.6.2.2 Device Requirements: Packet Transmission
2455          * The device MUST ignore flag bits that it does not recognize.
2456          */
2457         if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2458                 uint32_t hdrlen;
2459
2460                 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
2461                 if (hdr->csum_start <= hdrlen && l4_supported != 0) {
2462                         m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
2463                 } else {
2464                         /* Unknown proto or tunnel, do sw cksum. We can assume
2465                          * the cksum field is in the first segment since the
2466                          * buffers we provided to the host are large enough.
2467                          * In case of SCTP, this will be wrong since it's a CRC
2468                          * but there's nothing we can do.
2469                          */
2470                         uint16_t csum = 0, off;
2471
2472                         if (rte_raw_cksum_mbuf(m, hdr->csum_start,
2473                                         rte_pktmbuf_pkt_len(m) - hdr->csum_start, &csum) < 0)
2474                                 return;
2475                         if (likely(csum != 0xffff))
2476                                 csum = ~csum;
2477                         off = hdr->csum_offset + hdr->csum_start;
2478                         if (rte_pktmbuf_data_len(m) >= off + 1)
2479                                 *rte_pktmbuf_mtod_offset(m, uint16_t *, off) = csum;
2480                 }
2481         }
2482
2483         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2484                 if (hdr->gso_size == 0)
2485                         return;
2486
2487                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2488                 case VIRTIO_NET_HDR_GSO_TCPV4:
2489                 case VIRTIO_NET_HDR_GSO_TCPV6:
2490                         if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
2491                                 break;
2492                         m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2493                         m->tso_segsz = hdr->gso_size;
2494                         break;
2495                 case VIRTIO_NET_HDR_GSO_UDP:
2496                         if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
2497                                 break;
2498                         m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2499                         m->tso_segsz = hdr->gso_size;
2500                         break;
2501                 default:
2502                         break;
2503                 }
2504         }
2505 }
2506
2507 static __rte_noinline void
2508 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
2509                 struct buf_vector *buf_vec)
2510 {
2511         uint64_t len;
2512         uint64_t remain = sizeof(struct virtio_net_hdr);
2513         uint64_t src;
2514         uint64_t dst = (uint64_t)(uintptr_t)hdr;
2515
2516         while (remain) {
2517                 len = RTE_MIN(remain, buf_vec->buf_len);
2518                 src = buf_vec->buf_addr;
2519                 rte_memcpy((void *)(uintptr_t)dst,
2520                                 (void *)(uintptr_t)src, len);
2521
2522                 remain -= len;
2523                 dst += len;
2524                 buf_vec++;
2525         }
2526 }
2527
2528 static __rte_always_inline int
2529 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
2530                   struct buf_vector *buf_vec, uint16_t nr_vec,
2531                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
2532                   bool legacy_ol_flags)
2533 {
2534         uint32_t buf_avail, buf_offset;
2535         uint64_t buf_addr, buf_len;
2536         uint32_t mbuf_avail, mbuf_offset;
2537         uint32_t cpy_len;
2538         struct rte_mbuf *cur = m, *prev = m;
2539         struct virtio_net_hdr tmp_hdr;
2540         struct virtio_net_hdr *hdr = NULL;
2541         /* A counter to avoid desc dead loop chain */
2542         uint16_t vec_idx = 0;
2543         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
2544         int error = 0;
2545
2546         buf_addr = buf_vec[vec_idx].buf_addr;
2547         buf_len = buf_vec[vec_idx].buf_len;
2548
2549         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
2550                 error = -1;
2551                 goto out;
2552         }
2553
2554         if (virtio_net_with_host_offload(dev)) {
2555                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
2556                         /*
2557                          * No luck, the virtio-net header doesn't fit
2558                          * in a contiguous virtual area.
2559                          */
2560                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
2561                         hdr = &tmp_hdr;
2562                 } else {
2563                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
2564                 }
2565         }
2566
2567         /*
2568          * A virtio driver normally uses at least 2 desc buffers
2569          * for Tx: the first for storing the header, and others
2570          * for storing the data.
2571          */
2572         if (unlikely(buf_len < dev->vhost_hlen)) {
2573                 buf_offset = dev->vhost_hlen - buf_len;
2574                 vec_idx++;
2575                 buf_addr = buf_vec[vec_idx].buf_addr;
2576                 buf_len = buf_vec[vec_idx].buf_len;
2577                 buf_avail  = buf_len - buf_offset;
2578         } else if (buf_len == dev->vhost_hlen) {
2579                 if (unlikely(++vec_idx >= nr_vec))
2580                         goto out;
2581                 buf_addr = buf_vec[vec_idx].buf_addr;
2582                 buf_len = buf_vec[vec_idx].buf_len;
2583
2584                 buf_offset = 0;
2585                 buf_avail = buf_len;
2586         } else {
2587                 buf_offset = dev->vhost_hlen;
2588                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
2589         }
2590
2591         PRINT_PACKET(dev,
2592                         (uintptr_t)(buf_addr + buf_offset),
2593                         (uint32_t)buf_avail, 0);
2594
2595         mbuf_offset = 0;
2596         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
2597         while (1) {
2598                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
2599
2600                 if (likely(cpy_len > MAX_BATCH_LEN ||
2601                                         vq->batch_copy_nb_elems >= vq->size ||
2602                                         (hdr && cur == m))) {
2603                         rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
2604                                                 mbuf_offset),
2605                                         (void *)((uintptr_t)(buf_addr +
2606                                                         buf_offset)), cpy_len);
2607                 } else {
2608                         batch_copy[vq->batch_copy_nb_elems].dst =
2609                                 rte_pktmbuf_mtod_offset(cur, void *,
2610                                                 mbuf_offset);
2611                         batch_copy[vq->batch_copy_nb_elems].src =
2612                                 (void *)((uintptr_t)(buf_addr + buf_offset));
2613                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
2614                         vq->batch_copy_nb_elems++;
2615                 }
2616
2617                 mbuf_avail  -= cpy_len;
2618                 mbuf_offset += cpy_len;
2619                 buf_avail -= cpy_len;
2620                 buf_offset += cpy_len;
2621
2622                 /* This buf reaches to its end, get the next one */
2623                 if (buf_avail == 0) {
2624                         if (++vec_idx >= nr_vec)
2625                                 break;
2626
2627                         buf_addr = buf_vec[vec_idx].buf_addr;
2628                         buf_len = buf_vec[vec_idx].buf_len;
2629
2630                         buf_offset = 0;
2631                         buf_avail  = buf_len;
2632
2633                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
2634                                         (uint32_t)buf_avail, 0);
2635                 }
2636
2637                 /*
2638                  * This mbuf reaches to its end, get a new one
2639                  * to hold more data.
2640                  */
2641                 if (mbuf_avail == 0) {
2642                         cur = rte_pktmbuf_alloc(mbuf_pool);
2643                         if (unlikely(cur == NULL)) {
2644                                 VHOST_LOG_DATA(ERR, "(%s) failed to allocate memory for mbuf.\n",
2645                                                 dev->ifname);
2646                                 error = -1;
2647                                 goto out;
2648                         }
2649
2650                         prev->next = cur;
2651                         prev->data_len = mbuf_offset;
2652                         m->nb_segs += 1;
2653                         m->pkt_len += mbuf_offset;
2654                         prev = cur;
2655
2656                         mbuf_offset = 0;
2657                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2658                 }
2659         }
2660
2661         prev->data_len = mbuf_offset;
2662         m->pkt_len    += mbuf_offset;
2663
2664         if (hdr)
2665                 vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
2666
2667 out:
2668
2669         return error;
2670 }
2671
2672 static void
2673 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2674 {
2675         rte_free(opaque);
2676 }
2677
2678 static int
2679 virtio_dev_extbuf_alloc(struct virtio_net *dev, struct rte_mbuf *pkt, uint32_t size)
2680 {
2681         struct rte_mbuf_ext_shared_info *shinfo = NULL;
2682         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2683         uint16_t buf_len;
2684         rte_iova_t iova;
2685         void *buf;
2686
2687         total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2688         total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2689
2690         if (unlikely(total_len > UINT16_MAX))
2691                 return -ENOSPC;
2692
2693         buf_len = total_len;
2694         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2695         if (unlikely(buf == NULL))
2696                 return -ENOMEM;
2697
2698         /* Initialize shinfo */
2699         shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2700                                                 virtio_dev_extbuf_free, buf);
2701         if (unlikely(shinfo == NULL)) {
2702                 rte_free(buf);
2703                 VHOST_LOG_DATA(ERR, "(%s) failed to init shinfo\n", dev->ifname);
2704                 return -1;
2705         }
2706
2707         iova = rte_malloc_virt2iova(buf);
2708         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2709         rte_pktmbuf_reset_headroom(pkt);
2710
2711         return 0;
2712 }
2713
2714 /*
2715  * Prepare a host supported pktmbuf.
2716  */
2717 static __rte_always_inline int
2718 virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
2719                          uint32_t data_len)
2720 {
2721         if (rte_pktmbuf_tailroom(pkt) >= data_len)
2722                 return 0;
2723
2724         /* attach an external buffer if supported */
2725         if (dev->extbuf && !virtio_dev_extbuf_alloc(dev, pkt, data_len))
2726                 return 0;
2727
2728         /* check if chained buffers are allowed */
2729         if (!dev->linearbuf)
2730                 return 0;
2731
2732         return -1;
2733 }
2734
2735 __rte_always_inline
2736 static uint16_t
2737 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2738         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
2739         bool legacy_ol_flags)
2740 {
2741         uint16_t i;
2742         uint16_t free_entries;
2743         uint16_t dropped = 0;
2744         static bool allocerr_warned;
2745
2746         /*
2747          * The ordering between avail index and
2748          * desc reads needs to be enforced.
2749          */
2750         free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2751                         vq->last_avail_idx;
2752         if (free_entries == 0)
2753                 return 0;
2754
2755         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2756
2757         VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2758
2759         count = RTE_MIN(count, MAX_PKT_BURST);
2760         count = RTE_MIN(count, free_entries);
2761         VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
2762                         dev->ifname, count);
2763
2764         if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2765                 return 0;
2766
2767         for (i = 0; i < count; i++) {
2768                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2769                 uint16_t head_idx;
2770                 uint32_t buf_len;
2771                 uint16_t nr_vec = 0;
2772                 int err;
2773
2774                 if (unlikely(fill_vec_buf_split(dev, vq,
2775                                                 vq->last_avail_idx + i,
2776                                                 &nr_vec, buf_vec,
2777                                                 &head_idx, &buf_len,
2778                                                 VHOST_ACCESS_RO) < 0))
2779                         break;
2780
2781                 update_shadow_used_ring_split(vq, head_idx, 0);
2782
2783                 err = virtio_dev_pktmbuf_prep(dev, pkts[i], buf_len);
2784                 if (unlikely(err)) {
2785                         /*
2786                          * mbuf allocation fails for jumbo packets when external
2787                          * buffer allocation is not allowed and linear buffer
2788                          * is required. Drop this packet.
2789                          */
2790                         if (!allocerr_warned) {
2791                                 VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
2792                                         dev->ifname, buf_len, mbuf_pool->name);
2793                                 allocerr_warned = true;
2794                         }
2795                         dropped += 1;
2796                         i++;
2797                         break;
2798                 }
2799
2800                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2801                                 mbuf_pool, legacy_ol_flags);
2802                 if (unlikely(err)) {
2803                         if (!allocerr_warned) {
2804                                 VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
2805                                         dev->ifname);
2806                                 allocerr_warned = true;
2807                         }
2808                         dropped += 1;
2809                         i++;
2810                         break;
2811                 }
2812         }
2813
2814         if (dropped)
2815                 rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1);
2816
2817         vq->last_avail_idx += i;
2818
2819         do_data_copy_dequeue(vq);
2820         if (unlikely(i < count))
2821                 vq->shadow_used_idx = i;
2822         if (likely(vq->shadow_used_idx)) {
2823                 flush_shadow_used_ring_split(dev, vq);
2824                 vhost_vring_call_split(dev, vq);
2825         }
2826
2827         return (i - dropped);
2828 }
2829
2830 __rte_noinline
2831 static uint16_t
2832 virtio_dev_tx_split_legacy(struct virtio_net *dev,
2833         struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2834         struct rte_mbuf **pkts, uint16_t count)
2835 {
2836         return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
2837 }
2838
2839 __rte_noinline
2840 static uint16_t
2841 virtio_dev_tx_split_compliant(struct virtio_net *dev,
2842         struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2843         struct rte_mbuf **pkts, uint16_t count)
2844 {
2845         return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
2846 }
2847
2848 static __rte_always_inline int
2849 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2850                                  struct vhost_virtqueue *vq,
2851                                  struct rte_mbuf **pkts,
2852                                  uint16_t avail_idx,
2853                                  uintptr_t *desc_addrs,
2854                                  uint16_t *ids)
2855 {
2856         bool wrap = vq->avail_wrap_counter;
2857         struct vring_packed_desc *descs = vq->desc_packed;
2858         uint64_t lens[PACKED_BATCH_SIZE];
2859         uint64_t buf_lens[PACKED_BATCH_SIZE];
2860         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2861         uint16_t flags, i;
2862
2863         if (unlikely(avail_idx & PACKED_BATCH_MASK))
2864                 return -1;
2865         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2866                 return -1;
2867
2868         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2869                 flags = descs[avail_idx + i].flags;
2870                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2871                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
2872                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2873                         return -1;
2874         }
2875
2876         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2877
2878         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2879                 lens[i] = descs[avail_idx + i].len;
2880
2881         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2882                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2883                                                   descs[avail_idx + i].addr,
2884                                                   &lens[i], VHOST_ACCESS_RW);
2885         }
2886
2887         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2888                 if (unlikely(!desc_addrs[i]))
2889                         return -1;
2890                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2891                         return -1;
2892         }
2893
2894         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2895                 if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
2896                         goto err;
2897         }
2898
2899         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2900                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2901
2902         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2903                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2904                         goto err;
2905         }
2906
2907         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2908                 pkts[i]->pkt_len = lens[i] - buf_offset;
2909                 pkts[i]->data_len = pkts[i]->pkt_len;
2910                 ids[i] = descs[avail_idx + i].id;
2911         }
2912
2913         return 0;
2914
2915 err:
2916         return -1;
2917 }
2918
2919 static __rte_always_inline int
2920 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2921                            struct vhost_virtqueue *vq,
2922                            struct rte_mbuf **pkts,
2923                            bool legacy_ol_flags)
2924 {
2925         uint16_t avail_idx = vq->last_avail_idx;
2926         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2927         struct virtio_net_hdr *hdr;
2928         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2929         uint16_t ids[PACKED_BATCH_SIZE];
2930         uint16_t i;
2931
2932         if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
2933                                              desc_addrs, ids))
2934                 return -1;
2935
2936         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2937                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2938
2939         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2940                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2941                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2942                            pkts[i]->pkt_len);
2943
2944         if (virtio_net_with_host_offload(dev)) {
2945                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2946                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2947                         vhost_dequeue_offload(dev, hdr, pkts[i], legacy_ol_flags);
2948                 }
2949         }
2950
2951         if (virtio_net_is_inorder(dev))
2952                 vhost_shadow_dequeue_batch_packed_inorder(vq,
2953                         ids[PACKED_BATCH_SIZE - 1]);
2954         else
2955                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2956
2957         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2958
2959         return 0;
2960 }
2961
2962 static __rte_always_inline int
2963 vhost_dequeue_single_packed(struct virtio_net *dev,
2964                             struct vhost_virtqueue *vq,
2965                             struct rte_mempool *mbuf_pool,
2966                             struct rte_mbuf *pkts,
2967                             uint16_t *buf_id,
2968                             uint16_t *desc_count,
2969                             bool legacy_ol_flags)
2970 {
2971         struct buf_vector buf_vec[BUF_VECTOR_MAX];
2972         uint32_t buf_len;
2973         uint16_t nr_vec = 0;
2974         int err;
2975         static bool allocerr_warned;
2976
2977         if (unlikely(fill_vec_buf_packed(dev, vq,
2978                                          vq->last_avail_idx, desc_count,
2979                                          buf_vec, &nr_vec,
2980                                          buf_id, &buf_len,
2981                                          VHOST_ACCESS_RO) < 0))
2982                 return -1;
2983
2984         if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
2985                 if (!allocerr_warned) {
2986                         VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
2987                                 dev->ifname, buf_len, mbuf_pool->name);
2988                         allocerr_warned = true;
2989                 }
2990                 return -1;
2991         }
2992
2993         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
2994                                 mbuf_pool, legacy_ol_flags);
2995         if (unlikely(err)) {
2996                 if (!allocerr_warned) {
2997                         VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
2998                                 dev->ifname);
2999                         allocerr_warned = true;
3000                 }
3001                 return -1;
3002         }
3003
3004         return 0;
3005 }
3006
3007 static __rte_always_inline int
3008 virtio_dev_tx_single_packed(struct virtio_net *dev,
3009                             struct vhost_virtqueue *vq,
3010                             struct rte_mempool *mbuf_pool,
3011                             struct rte_mbuf *pkts,
3012                             bool legacy_ol_flags)
3013 {
3014
3015         uint16_t buf_id, desc_count = 0;
3016         int ret;
3017
3018         ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
3019                                         &desc_count, legacy_ol_flags);
3020
3021         if (likely(desc_count > 0)) {
3022                 if (virtio_net_is_inorder(dev))
3023                         vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
3024                                                                    desc_count);
3025                 else
3026                         vhost_shadow_dequeue_single_packed(vq, buf_id,
3027                                         desc_count);
3028
3029                 vq_inc_last_avail_packed(vq, desc_count);
3030         }
3031
3032         return ret;
3033 }
3034
3035 __rte_always_inline
3036 static uint16_t
3037 virtio_dev_tx_packed(struct virtio_net *dev,
3038                      struct vhost_virtqueue *__rte_restrict vq,
3039                      struct rte_mempool *mbuf_pool,
3040                      struct rte_mbuf **__rte_restrict pkts,
3041                      uint32_t count,
3042                      bool legacy_ol_flags)
3043 {
3044         uint32_t pkt_idx = 0;
3045
3046         if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
3047                 return 0;
3048
3049         do {
3050                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
3051
3052                 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
3053                         if (!virtio_dev_tx_batch_packed(dev, vq,
3054                                                         &pkts[pkt_idx],
3055                                                         legacy_ol_flags)) {
3056                                 pkt_idx += PACKED_BATCH_SIZE;
3057                                 continue;
3058                         }
3059                 }
3060
3061                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
3062                                                 pkts[pkt_idx],
3063                                                 legacy_ol_flags))
3064                         break;
3065                 pkt_idx++;
3066         } while (pkt_idx < count);
3067
3068         if (pkt_idx != count)
3069                 rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
3070
3071         if (vq->shadow_used_idx) {
3072                 do_data_copy_dequeue(vq);
3073
3074                 vhost_flush_dequeue_shadow_packed(dev, vq);
3075                 vhost_vring_call_packed(dev, vq);
3076         }
3077
3078         return pkt_idx;
3079 }
3080
3081 __rte_noinline
3082 static uint16_t
3083 virtio_dev_tx_packed_legacy(struct virtio_net *dev,
3084         struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3085         struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3086 {
3087         return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
3088 }
3089
3090 __rte_noinline
3091 static uint16_t
3092 virtio_dev_tx_packed_compliant(struct virtio_net *dev,
3093         struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3094         struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3095 {
3096         return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
3097 }
3098
3099 uint16_t
3100 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
3101         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
3102 {
3103         struct virtio_net *dev;
3104         struct rte_mbuf *rarp_mbuf = NULL;
3105         struct vhost_virtqueue *vq;
3106         int16_t success = 1;
3107
3108         dev = get_device(vid);
3109         if (!dev)
3110                 return 0;
3111
3112         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
3113                 VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
3114                                 dev->ifname, __func__);
3115                 return 0;
3116         }
3117
3118         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
3119                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
3120                                 dev->ifname, __func__, queue_id);
3121                 return 0;
3122         }
3123
3124         vq = dev->virtqueue[queue_id];
3125
3126         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
3127                 return 0;
3128
3129         if (unlikely(!vq->enabled)) {
3130                 count = 0;
3131                 goto out_access_unlock;
3132         }
3133
3134         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3135                 vhost_user_iotlb_rd_lock(vq);
3136
3137         if (unlikely(!vq->access_ok))
3138                 if (unlikely(vring_translate(dev, vq) < 0)) {
3139                         count = 0;
3140                         goto out;
3141                 }
3142
3143         /*
3144          * Construct a RARP broadcast packet, and inject it to the "pkts"
3145          * array, to looks like that guest actually send such packet.
3146          *
3147          * Check user_send_rarp() for more information.
3148          *
3149          * broadcast_rarp shares a cacheline in the virtio_net structure
3150          * with some fields that are accessed during enqueue and
3151          * __atomic_compare_exchange_n causes a write if performed compare
3152          * and exchange. This could result in false sharing between enqueue
3153          * and dequeue.
3154          *
3155          * Prevent unnecessary false sharing by reading broadcast_rarp first
3156          * and only performing compare and exchange if the read indicates it
3157          * is likely to be set.
3158          */
3159         if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
3160                         __atomic_compare_exchange_n(&dev->broadcast_rarp,
3161                         &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
3162
3163                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
3164                 if (rarp_mbuf == NULL) {
3165                         VHOST_LOG_DATA(ERR, "(%s) failed to make RARP packet.\n", dev->ifname);
3166                         count = 0;
3167                         goto out;
3168                 }
3169                 /*
3170                  * Inject it to the head of "pkts" array, so that switch's mac
3171                  * learning table will get updated first.
3172                  */
3173                 pkts[0] = rarp_mbuf;
3174                 vhost_queue_stats_update(dev, vq, pkts, 1);
3175                 pkts++;
3176                 count -= 1;
3177         }
3178
3179         if (vq_is_packed(dev)) {
3180                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3181                         count = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
3182                 else
3183                         count = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
3184         } else {
3185                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3186                         count = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
3187                 else
3188                         count = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
3189         }
3190
3191         vhost_queue_stats_update(dev, vq, pkts, count);
3192
3193 out:
3194         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3195                 vhost_user_iotlb_rd_unlock(vq);
3196
3197 out_access_unlock:
3198         rte_spinlock_unlock(&vq->access_lock);
3199
3200         if (unlikely(rarp_mbuf != NULL))
3201                 count += 1;
3202
3203         return count;
3204 }