886e076b286bf40e18c8a3aca0c684cdf0d00e64
[dpdk.git] / lib / vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_net.h>
12 #include <rte_ether.h>
13 #include <rte_ip.h>
14 #include <rte_dmadev.h>
15 #include <rte_vhost.h>
16 #include <rte_tcp.h>
17 #include <rte_udp.h>
18 #include <rte_sctp.h>
19 #include <rte_arp.h>
20 #include <rte_spinlock.h>
21 #include <rte_malloc.h>
22 #include <rte_vhost_async.h>
23
24 #include "iotlb.h"
25 #include "vhost.h"
26
27 #define MAX_BATCH_LEN 256
28
29 /* DMA device copy operation tracking array. */
30 struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
31
32 static  __rte_always_inline bool
33 rxvq_is_mergeable(struct virtio_net *dev)
34 {
35         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
36 }
37
38 static  __rte_always_inline bool
39 virtio_net_is_inorder(struct virtio_net *dev)
40 {
41         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
42 }
43
44 static bool
45 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
46 {
47         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
48 }
49
50 static __rte_always_inline int64_t
51 vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
52                 int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
53                 struct vhost_iov_iter *pkt)
54 {
55         struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
56         uint16_t ring_mask = dma_info->ring_mask;
57         static bool vhost_async_dma_copy_log;
58
59
60         struct vhost_iovec *iov = pkt->iov;
61         int copy_idx = 0;
62         uint32_t nr_segs = pkt->nr_segs;
63         uint16_t i;
64
65         if (rte_dma_burst_capacity(dma_id, vchan_id) < nr_segs)
66                 return -1;
67
68         for (i = 0; i < nr_segs; i++) {
69                 copy_idx = rte_dma_copy(dma_id, vchan_id, (rte_iova_t)iov[i].src_addr,
70                                 (rte_iova_t)iov[i].dst_addr, iov[i].len, RTE_DMA_OP_FLAG_LLC);
71                 /**
72                  * Since all memory is pinned and DMA vChannel
73                  * ring has enough space, failure should be a
74                  * rare case. If failure happens, it means DMA
75                  * device encounters serious errors; in this
76                  * case, please stop async data-path and check
77                  * what has happened to DMA device.
78                  */
79                 if (unlikely(copy_idx < 0)) {
80                         if (!vhost_async_dma_copy_log) {
81                                 VHOST_LOG_DATA(ERR, "(%s) DMA copy failed for channel %d:%u\n",
82                                                 dev->ifname, dma_id, vchan_id);
83                                 vhost_async_dma_copy_log = true;
84                         }
85                         return -1;
86                 }
87         }
88
89         /**
90          * Only store packet completion flag address in the last copy's
91          * slot, and other slots are set to NULL.
92          */
93         dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask] = &vq->async->pkts_cmpl_flag[flag_idx];
94
95         return nr_segs;
96 }
97
98 static __rte_always_inline uint16_t
99 vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue *vq,
100                 int16_t dma_id, uint16_t vchan_id, uint16_t head_idx,
101                 struct vhost_iov_iter *pkts, uint16_t nr_pkts)
102 {
103         struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
104         int64_t ret, nr_copies = 0;
105         uint16_t pkt_idx;
106
107         rte_spinlock_lock(&dma_info->dma_lock);
108
109         for (pkt_idx = 0; pkt_idx < nr_pkts; pkt_idx++) {
110                 ret = vhost_async_dma_transfer_one(dev, vq, dma_id, vchan_id, head_idx,
111                                 &pkts[pkt_idx]);
112                 if (unlikely(ret < 0))
113                         break;
114
115                 nr_copies += ret;
116                 head_idx++;
117                 if (head_idx >= vq->size)
118                         head_idx -= vq->size;
119         }
120
121         if (likely(nr_copies > 0))
122                 rte_dma_submit(dma_id, vchan_id);
123
124         rte_spinlock_unlock(&dma_info->dma_lock);
125
126         return pkt_idx;
127 }
128
129 static __rte_always_inline uint16_t
130 vhost_async_dma_check_completed(struct virtio_net *dev, int16_t dma_id, uint16_t vchan_id,
131                 uint16_t max_pkts)
132 {
133         struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
134         uint16_t ring_mask = dma_info->ring_mask;
135         uint16_t last_idx = 0;
136         uint16_t nr_copies;
137         uint16_t copy_idx;
138         uint16_t i;
139         bool has_error = false;
140         static bool vhost_async_dma_complete_log;
141
142         rte_spinlock_lock(&dma_info->dma_lock);
143
144         /**
145          * Print error log for debugging, if DMA reports error during
146          * DMA transfer. We do not handle error in vhost level.
147          */
148         nr_copies = rte_dma_completed(dma_id, vchan_id, max_pkts, &last_idx, &has_error);
149         if (unlikely(!vhost_async_dma_complete_log && has_error)) {
150                 VHOST_LOG_DATA(ERR, "(%s) DMA completion failure on channel %d:%u\n", dev->ifname,
151                                 dma_id, vchan_id);
152                 vhost_async_dma_complete_log = true;
153         } else if (nr_copies == 0) {
154                 goto out;
155         }
156
157         copy_idx = last_idx - nr_copies + 1;
158         for (i = 0; i < nr_copies; i++) {
159                 bool *flag;
160
161                 flag = dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask];
162                 if (flag) {
163                         /**
164                          * Mark the packet flag as received. The flag
165                          * could belong to another virtqueue but write
166                          * is atomic.
167                          */
168                         *flag = true;
169                         dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask] = NULL;
170                 }
171                 copy_idx++;
172         }
173
174 out:
175         rte_spinlock_unlock(&dma_info->dma_lock);
176         return nr_copies;
177 }
178
179 static inline void
180 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
181 {
182         struct batch_copy_elem *elem = vq->batch_copy_elems;
183         uint16_t count = vq->batch_copy_nb_elems;
184         int i;
185
186         for (i = 0; i < count; i++) {
187                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
188                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
189                                            elem[i].len);
190                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
191         }
192
193         vq->batch_copy_nb_elems = 0;
194 }
195
196 static inline void
197 do_data_copy_dequeue(struct vhost_virtqueue *vq)
198 {
199         struct batch_copy_elem *elem = vq->batch_copy_elems;
200         uint16_t count = vq->batch_copy_nb_elems;
201         int i;
202
203         for (i = 0; i < count; i++)
204                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
205
206         vq->batch_copy_nb_elems = 0;
207 }
208
209 static __rte_always_inline void
210 do_flush_shadow_used_ring_split(struct virtio_net *dev,
211                         struct vhost_virtqueue *vq,
212                         uint16_t to, uint16_t from, uint16_t size)
213 {
214         rte_memcpy(&vq->used->ring[to],
215                         &vq->shadow_used_split[from],
216                         size * sizeof(struct vring_used_elem));
217         vhost_log_cache_used_vring(dev, vq,
218                         offsetof(struct vring_used, ring[to]),
219                         size * sizeof(struct vring_used_elem));
220 }
221
222 static __rte_always_inline void
223 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
224 {
225         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
226
227         if (used_idx + vq->shadow_used_idx <= vq->size) {
228                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
229                                           vq->shadow_used_idx);
230         } else {
231                 uint16_t size;
232
233                 /* update used ring interval [used_idx, vq->size] */
234                 size = vq->size - used_idx;
235                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
236
237                 /* update the left half used ring interval [0, left_size] */
238                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
239                                           vq->shadow_used_idx - size);
240         }
241         vq->last_used_idx += vq->shadow_used_idx;
242
243         vhost_log_cache_sync(dev, vq);
244
245         __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
246                            __ATOMIC_RELEASE);
247         vq->shadow_used_idx = 0;
248         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
249                 sizeof(vq->used->idx));
250 }
251
252 static __rte_always_inline void
253 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
254                          uint16_t desc_idx, uint32_t len)
255 {
256         uint16_t i = vq->shadow_used_idx++;
257
258         vq->shadow_used_split[i].id  = desc_idx;
259         vq->shadow_used_split[i].len = len;
260 }
261
262 static __rte_always_inline void
263 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
264                                   struct vhost_virtqueue *vq)
265 {
266         int i;
267         uint16_t used_idx = vq->last_used_idx;
268         uint16_t head_idx = vq->last_used_idx;
269         uint16_t head_flags = 0;
270
271         /* Split loop in two to save memory barriers */
272         for (i = 0; i < vq->shadow_used_idx; i++) {
273                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
274                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
275
276                 used_idx += vq->shadow_used_packed[i].count;
277                 if (used_idx >= vq->size)
278                         used_idx -= vq->size;
279         }
280
281         /* The ordering for storing desc flags needs to be enforced. */
282         rte_atomic_thread_fence(__ATOMIC_RELEASE);
283
284         for (i = 0; i < vq->shadow_used_idx; i++) {
285                 uint16_t flags;
286
287                 if (vq->shadow_used_packed[i].len)
288                         flags = VRING_DESC_F_WRITE;
289                 else
290                         flags = 0;
291
292                 if (vq->used_wrap_counter) {
293                         flags |= VRING_DESC_F_USED;
294                         flags |= VRING_DESC_F_AVAIL;
295                 } else {
296                         flags &= ~VRING_DESC_F_USED;
297                         flags &= ~VRING_DESC_F_AVAIL;
298                 }
299
300                 if (i > 0) {
301                         vq->desc_packed[vq->last_used_idx].flags = flags;
302
303                         vhost_log_cache_used_vring(dev, vq,
304                                         vq->last_used_idx *
305                                         sizeof(struct vring_packed_desc),
306                                         sizeof(struct vring_packed_desc));
307                 } else {
308                         head_idx = vq->last_used_idx;
309                         head_flags = flags;
310                 }
311
312                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
313         }
314
315         vq->desc_packed[head_idx].flags = head_flags;
316
317         vhost_log_cache_used_vring(dev, vq,
318                                 head_idx *
319                                 sizeof(struct vring_packed_desc),
320                                 sizeof(struct vring_packed_desc));
321
322         vq->shadow_used_idx = 0;
323         vhost_log_cache_sync(dev, vq);
324 }
325
326 static __rte_always_inline void
327 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
328                                   struct vhost_virtqueue *vq)
329 {
330         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
331
332         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
333         /* desc flags is the synchronization point for virtio packed vring */
334         __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
335                          used_elem->flags, __ATOMIC_RELEASE);
336
337         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
338                                    sizeof(struct vring_packed_desc),
339                                    sizeof(struct vring_packed_desc));
340         vq->shadow_used_idx = 0;
341         vhost_log_cache_sync(dev, vq);
342 }
343
344 static __rte_always_inline void
345 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
346                                  struct vhost_virtqueue *vq,
347                                  uint64_t *lens,
348                                  uint16_t *ids)
349 {
350         uint16_t i;
351         uint16_t flags;
352         uint16_t last_used_idx;
353         struct vring_packed_desc *desc_base;
354
355         last_used_idx = vq->last_used_idx;
356         desc_base = &vq->desc_packed[last_used_idx];
357
358         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
359
360         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
361                 desc_base[i].id = ids[i];
362                 desc_base[i].len = lens[i];
363         }
364
365         rte_atomic_thread_fence(__ATOMIC_RELEASE);
366
367         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
368                 desc_base[i].flags = flags;
369         }
370
371         vhost_log_cache_used_vring(dev, vq, last_used_idx *
372                                    sizeof(struct vring_packed_desc),
373                                    sizeof(struct vring_packed_desc) *
374                                    PACKED_BATCH_SIZE);
375         vhost_log_cache_sync(dev, vq);
376
377         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
378 }
379
380 static __rte_always_inline void
381 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
382                                           uint16_t id)
383 {
384         vq->shadow_used_packed[0].id = id;
385
386         if (!vq->shadow_used_idx) {
387                 vq->shadow_last_used_idx = vq->last_used_idx;
388                 vq->shadow_used_packed[0].flags =
389                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
390                 vq->shadow_used_packed[0].len = 0;
391                 vq->shadow_used_packed[0].count = 1;
392                 vq->shadow_used_idx++;
393         }
394
395         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
396 }
397
398 static __rte_always_inline void
399 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
400                                   struct vhost_virtqueue *vq,
401                                   uint16_t *ids)
402 {
403         uint16_t flags;
404         uint16_t i;
405         uint16_t begin;
406
407         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
408
409         if (!vq->shadow_used_idx) {
410                 vq->shadow_last_used_idx = vq->last_used_idx;
411                 vq->shadow_used_packed[0].id  = ids[0];
412                 vq->shadow_used_packed[0].len = 0;
413                 vq->shadow_used_packed[0].count = 1;
414                 vq->shadow_used_packed[0].flags = flags;
415                 vq->shadow_used_idx++;
416                 begin = 1;
417         } else
418                 begin = 0;
419
420         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
421                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
422                 vq->desc_packed[vq->last_used_idx + i].len = 0;
423         }
424
425         rte_atomic_thread_fence(__ATOMIC_RELEASE);
426         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
427                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
428
429         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
430                                    sizeof(struct vring_packed_desc),
431                                    sizeof(struct vring_packed_desc) *
432                                    PACKED_BATCH_SIZE);
433         vhost_log_cache_sync(dev, vq);
434
435         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
436 }
437
438 static __rte_always_inline void
439 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
440                                    uint16_t buf_id,
441                                    uint16_t count)
442 {
443         uint16_t flags;
444
445         flags = vq->desc_packed[vq->last_used_idx].flags;
446         if (vq->used_wrap_counter) {
447                 flags |= VRING_DESC_F_USED;
448                 flags |= VRING_DESC_F_AVAIL;
449         } else {
450                 flags &= ~VRING_DESC_F_USED;
451                 flags &= ~VRING_DESC_F_AVAIL;
452         }
453
454         if (!vq->shadow_used_idx) {
455                 vq->shadow_last_used_idx = vq->last_used_idx;
456
457                 vq->shadow_used_packed[0].id  = buf_id;
458                 vq->shadow_used_packed[0].len = 0;
459                 vq->shadow_used_packed[0].flags = flags;
460                 vq->shadow_used_idx++;
461         } else {
462                 vq->desc_packed[vq->last_used_idx].id = buf_id;
463                 vq->desc_packed[vq->last_used_idx].len = 0;
464                 vq->desc_packed[vq->last_used_idx].flags = flags;
465         }
466
467         vq_inc_last_used_packed(vq, count);
468 }
469
470 static __rte_always_inline void
471 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
472                                            uint16_t buf_id,
473                                            uint16_t count)
474 {
475         uint16_t flags;
476
477         vq->shadow_used_packed[0].id = buf_id;
478
479         flags = vq->desc_packed[vq->last_used_idx].flags;
480         if (vq->used_wrap_counter) {
481                 flags |= VRING_DESC_F_USED;
482                 flags |= VRING_DESC_F_AVAIL;
483         } else {
484                 flags &= ~VRING_DESC_F_USED;
485                 flags &= ~VRING_DESC_F_AVAIL;
486         }
487
488         if (!vq->shadow_used_idx) {
489                 vq->shadow_last_used_idx = vq->last_used_idx;
490                 vq->shadow_used_packed[0].len = 0;
491                 vq->shadow_used_packed[0].flags = flags;
492                 vq->shadow_used_idx++;
493         }
494
495         vq_inc_last_used_packed(vq, count);
496 }
497
498 static __rte_always_inline void
499 vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
500                                    uint32_t *len,
501                                    uint16_t *id,
502                                    uint16_t *count,
503                                    uint16_t num_buffers)
504 {
505         uint16_t i;
506
507         for (i = 0; i < num_buffers; i++) {
508                 /* enqueue shadow flush action aligned with batch num */
509                 if (!vq->shadow_used_idx)
510                         vq->shadow_aligned_idx = vq->last_used_idx &
511                                 PACKED_BATCH_MASK;
512                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
513                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
514                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
515                 vq->shadow_aligned_idx += count[i];
516                 vq->shadow_used_idx++;
517         }
518 }
519
520 static __rte_always_inline void
521 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
522                                    struct vhost_virtqueue *vq,
523                                    uint32_t *len,
524                                    uint16_t *id,
525                                    uint16_t *count,
526                                    uint16_t num_buffers)
527 {
528         vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
529
530         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
531                 do_data_copy_enqueue(dev, vq);
532                 vhost_flush_enqueue_shadow_packed(dev, vq);
533         }
534 }
535
536 /* avoid write operation when necessary, to lessen cache issues */
537 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
538         if ((var) != (val))                     \
539                 (var) = (val);                  \
540 } while (0)
541
542 static __rte_always_inline void
543 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
544 {
545         uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
546
547         if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
548                 csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
549
550         if (csum_l4) {
551                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
552                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
553
554                 switch (csum_l4) {
555                 case RTE_MBUF_F_TX_TCP_CKSUM:
556                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
557                                                 cksum));
558                         break;
559                 case RTE_MBUF_F_TX_UDP_CKSUM:
560                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
561                                                 dgram_cksum));
562                         break;
563                 case RTE_MBUF_F_TX_SCTP_CKSUM:
564                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
565                                                 cksum));
566                         break;
567                 }
568         } else {
569                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
570                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
571                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
572         }
573
574         /* IP cksum verification cannot be bypassed, then calculate here */
575         if (m_buf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
576                 struct rte_ipv4_hdr *ipv4_hdr;
577
578                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
579                                                    m_buf->l2_len);
580                 ipv4_hdr->hdr_checksum = 0;
581                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
582         }
583
584         if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
585                 if (m_buf->ol_flags & RTE_MBUF_F_TX_IPV4)
586                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
587                 else
588                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
589                 net_hdr->gso_size = m_buf->tso_segsz;
590                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
591                                         + m_buf->l4_len;
592         } else if (m_buf->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
593                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
594                 net_hdr->gso_size = m_buf->tso_segsz;
595                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
596                         m_buf->l4_len;
597         } else {
598                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
599                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
600                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
601         }
602 }
603
604 static __rte_always_inline int
605 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
606                 struct buf_vector *buf_vec, uint16_t *vec_idx,
607                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
608 {
609         uint16_t vec_id = *vec_idx;
610
611         while (desc_len) {
612                 uint64_t desc_addr;
613                 uint64_t desc_chunck_len = desc_len;
614
615                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
616                         return -1;
617
618                 desc_addr = vhost_iova_to_vva(dev, vq,
619                                 desc_iova,
620                                 &desc_chunck_len,
621                                 perm);
622                 if (unlikely(!desc_addr))
623                         return -1;
624
625                 rte_prefetch0((void *)(uintptr_t)desc_addr);
626
627                 buf_vec[vec_id].buf_iova = desc_iova;
628                 buf_vec[vec_id].buf_addr = desc_addr;
629                 buf_vec[vec_id].buf_len  = desc_chunck_len;
630
631                 desc_len -= desc_chunck_len;
632                 desc_iova += desc_chunck_len;
633                 vec_id++;
634         }
635         *vec_idx = vec_id;
636
637         return 0;
638 }
639
640 static __rte_always_inline int
641 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
642                          uint32_t avail_idx, uint16_t *vec_idx,
643                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
644                          uint32_t *desc_chain_len, uint8_t perm)
645 {
646         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
647         uint16_t vec_id = *vec_idx;
648         uint32_t len    = 0;
649         uint64_t dlen;
650         uint32_t nr_descs = vq->size;
651         uint32_t cnt    = 0;
652         struct vring_desc *descs = vq->desc;
653         struct vring_desc *idesc = NULL;
654
655         if (unlikely(idx >= vq->size))
656                 return -1;
657
658         *desc_chain_head = idx;
659
660         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
661                 dlen = vq->desc[idx].len;
662                 nr_descs = dlen / sizeof(struct vring_desc);
663                 if (unlikely(nr_descs > vq->size))
664                         return -1;
665
666                 descs = (struct vring_desc *)(uintptr_t)
667                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
668                                                 &dlen,
669                                                 VHOST_ACCESS_RO);
670                 if (unlikely(!descs))
671                         return -1;
672
673                 if (unlikely(dlen < vq->desc[idx].len)) {
674                         /*
675                          * The indirect desc table is not contiguous
676                          * in process VA space, we have to copy it.
677                          */
678                         idesc = vhost_alloc_copy_ind_table(dev, vq,
679                                         vq->desc[idx].addr, vq->desc[idx].len);
680                         if (unlikely(!idesc))
681                                 return -1;
682
683                         descs = idesc;
684                 }
685
686                 idx = 0;
687         }
688
689         while (1) {
690                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
691                         free_ind_table(idesc);
692                         return -1;
693                 }
694
695                 dlen = descs[idx].len;
696                 len += dlen;
697
698                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
699                                                 descs[idx].addr, dlen,
700                                                 perm))) {
701                         free_ind_table(idesc);
702                         return -1;
703                 }
704
705                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
706                         break;
707
708                 idx = descs[idx].next;
709         }
710
711         *desc_chain_len = len;
712         *vec_idx = vec_id;
713
714         if (unlikely(!!idesc))
715                 free_ind_table(idesc);
716
717         return 0;
718 }
719
720 /*
721  * Returns -1 on fail, 0 on success
722  */
723 static inline int
724 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
725                                 uint32_t size, struct buf_vector *buf_vec,
726                                 uint16_t *num_buffers, uint16_t avail_head,
727                                 uint16_t *nr_vec)
728 {
729         uint16_t cur_idx;
730         uint16_t vec_idx = 0;
731         uint16_t max_tries, tries = 0;
732
733         uint16_t head_idx = 0;
734         uint32_t len = 0;
735
736         *num_buffers = 0;
737         cur_idx  = vq->last_avail_idx;
738
739         if (rxvq_is_mergeable(dev))
740                 max_tries = vq->size - 1;
741         else
742                 max_tries = 1;
743
744         while (size > 0) {
745                 if (unlikely(cur_idx == avail_head))
746                         return -1;
747                 /*
748                  * if we tried all available ring items, and still
749                  * can't get enough buf, it means something abnormal
750                  * happened.
751                  */
752                 if (unlikely(++tries > max_tries))
753                         return -1;
754
755                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
756                                                 &vec_idx, buf_vec,
757                                                 &head_idx, &len,
758                                                 VHOST_ACCESS_RW) < 0))
759                         return -1;
760                 len = RTE_MIN(len, size);
761                 update_shadow_used_ring_split(vq, head_idx, len);
762                 size -= len;
763
764                 cur_idx++;
765                 *num_buffers += 1;
766         }
767
768         *nr_vec = vec_idx;
769
770         return 0;
771 }
772
773 static __rte_always_inline int
774 fill_vec_buf_packed_indirect(struct virtio_net *dev,
775                         struct vhost_virtqueue *vq,
776                         struct vring_packed_desc *desc, uint16_t *vec_idx,
777                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
778 {
779         uint16_t i;
780         uint32_t nr_descs;
781         uint16_t vec_id = *vec_idx;
782         uint64_t dlen;
783         struct vring_packed_desc *descs, *idescs = NULL;
784
785         dlen = desc->len;
786         descs = (struct vring_packed_desc *)(uintptr_t)
787                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
788         if (unlikely(!descs))
789                 return -1;
790
791         if (unlikely(dlen < desc->len)) {
792                 /*
793                  * The indirect desc table is not contiguous
794                  * in process VA space, we have to copy it.
795                  */
796                 idescs = vhost_alloc_copy_ind_table(dev,
797                                 vq, desc->addr, desc->len);
798                 if (unlikely(!idescs))
799                         return -1;
800
801                 descs = idescs;
802         }
803
804         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
805         if (unlikely(nr_descs >= vq->size)) {
806                 free_ind_table(idescs);
807                 return -1;
808         }
809
810         for (i = 0; i < nr_descs; i++) {
811                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
812                         free_ind_table(idescs);
813                         return -1;
814                 }
815
816                 dlen = descs[i].len;
817                 *len += dlen;
818                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
819                                                 descs[i].addr, dlen,
820                                                 perm)))
821                         return -1;
822         }
823         *vec_idx = vec_id;
824
825         if (unlikely(!!idescs))
826                 free_ind_table(idescs);
827
828         return 0;
829 }
830
831 static __rte_always_inline int
832 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
833                                 uint16_t avail_idx, uint16_t *desc_count,
834                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
835                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
836 {
837         bool wrap_counter = vq->avail_wrap_counter;
838         struct vring_packed_desc *descs = vq->desc_packed;
839         uint16_t vec_id = *vec_idx;
840         uint64_t dlen;
841
842         if (avail_idx < vq->last_avail_idx)
843                 wrap_counter ^= 1;
844
845         /*
846          * Perform a load-acquire barrier in desc_is_avail to
847          * enforce the ordering between desc flags and desc
848          * content.
849          */
850         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
851                 return -1;
852
853         *desc_count = 0;
854         *len = 0;
855
856         while (1) {
857                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
858                         return -1;
859
860                 if (unlikely(*desc_count >= vq->size))
861                         return -1;
862
863                 *desc_count += 1;
864                 *buf_id = descs[avail_idx].id;
865
866                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
867                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
868                                                         &descs[avail_idx],
869                                                         &vec_id, buf_vec,
870                                                         len, perm) < 0))
871                                 return -1;
872                 } else {
873                         dlen = descs[avail_idx].len;
874                         *len += dlen;
875
876                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
877                                                         descs[avail_idx].addr,
878                                                         dlen,
879                                                         perm)))
880                                 return -1;
881                 }
882
883                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
884                         break;
885
886                 if (++avail_idx >= vq->size) {
887                         avail_idx -= vq->size;
888                         wrap_counter ^= 1;
889                 }
890         }
891
892         *vec_idx = vec_id;
893
894         return 0;
895 }
896
897 static __rte_noinline void
898 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
899                 struct buf_vector *buf_vec,
900                 struct virtio_net_hdr_mrg_rxbuf *hdr)
901 {
902         uint64_t len;
903         uint64_t remain = dev->vhost_hlen;
904         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
905         uint64_t iova = buf_vec->buf_iova;
906
907         while (remain) {
908                 len = RTE_MIN(remain,
909                                 buf_vec->buf_len);
910                 dst = buf_vec->buf_addr;
911                 rte_memcpy((void *)(uintptr_t)dst,
912                                 (void *)(uintptr_t)src,
913                                 len);
914
915                 PRINT_PACKET(dev, (uintptr_t)dst,
916                                 (uint32_t)len, 0);
917                 vhost_log_cache_write_iova(dev, vq,
918                                 iova, len);
919
920                 remain -= len;
921                 iova += len;
922                 src += len;
923                 buf_vec++;
924         }
925 }
926
927 static __rte_always_inline int
928 async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)
929 {
930         struct vhost_iov_iter *iter;
931
932         if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
933                 VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
934                 return -1;
935         }
936
937         iter = async->iov_iter + async->iter_idx;
938         iter->iov = async->iovec + async->iovec_idx;
939         iter->nr_segs = 0;
940
941         return 0;
942 }
943
944 static __rte_always_inline int
945 async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,
946                 void *src, void *dst, size_t len)
947 {
948         struct vhost_iov_iter *iter;
949         struct vhost_iovec *iovec;
950
951         if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
952                 static bool vhost_max_async_vec_log;
953
954                 if (!vhost_max_async_vec_log) {
955                         VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
956                         vhost_max_async_vec_log = true;
957                 }
958
959                 return -1;
960         }
961
962         iter = async->iov_iter + async->iter_idx;
963         iovec = async->iovec + async->iovec_idx;
964
965         iovec->src_addr = src;
966         iovec->dst_addr = dst;
967         iovec->len = len;
968
969         iter->nr_segs++;
970         async->iovec_idx++;
971
972         return 0;
973 }
974
975 static __rte_always_inline void
976 async_iter_finalize(struct vhost_async *async)
977 {
978         async->iter_idx++;
979 }
980
981 static __rte_always_inline void
982 async_iter_cancel(struct vhost_async *async)
983 {
984         struct vhost_iov_iter *iter;
985
986         iter = async->iov_iter + async->iter_idx;
987         async->iovec_idx -= iter->nr_segs;
988         iter->nr_segs = 0;
989         iter->iov = NULL;
990 }
991
992 static __rte_always_inline void
993 async_iter_reset(struct vhost_async *async)
994 {
995         async->iter_idx = 0;
996         async->iovec_idx = 0;
997 }
998
999 static __rte_always_inline int
1000 async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
1001                 struct rte_mbuf *m, uint32_t mbuf_offset,
1002                 uint64_t buf_iova, uint32_t cpy_len)
1003 {
1004         struct vhost_async *async = vq->async;
1005         uint64_t mapped_len;
1006         uint32_t buf_offset = 0;
1007         void *hpa;
1008
1009         while (cpy_len) {
1010                 hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
1011                                 buf_iova + buf_offset, cpy_len, &mapped_len);
1012                 if (unlikely(!hpa)) {
1013                         VHOST_LOG_DATA(ERR, "(%s) %s: failed to get hpa.\n", dev->ifname, __func__);
1014                         return -1;
1015                 }
1016
1017                 if (unlikely(async_iter_add_iovec(dev, async,
1018                                                 (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
1019                                                         mbuf_offset),
1020                                                 hpa, (size_t)mapped_len)))
1021                         return -1;
1022
1023                 cpy_len -= (uint32_t)mapped_len;
1024                 mbuf_offset += (uint32_t)mapped_len;
1025                 buf_offset += (uint32_t)mapped_len;
1026         }
1027
1028         return 0;
1029 }
1030
1031 static __rte_always_inline void
1032 sync_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
1033                 struct rte_mbuf *m, uint32_t mbuf_offset,
1034                 uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len)
1035 {
1036         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1037
1038         if (likely(cpy_len > MAX_BATCH_LEN || vq->batch_copy_nb_elems >= vq->size)) {
1039                 rte_memcpy((void *)((uintptr_t)(buf_addr)),
1040                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
1041                                 cpy_len);
1042                 vhost_log_cache_write_iova(dev, vq, buf_iova, cpy_len);
1043                 PRINT_PACKET(dev, (uintptr_t)(buf_addr), cpy_len, 0);
1044         } else {
1045                 batch_copy[vq->batch_copy_nb_elems].dst =
1046                         (void *)((uintptr_t)(buf_addr));
1047                 batch_copy[vq->batch_copy_nb_elems].src =
1048                         rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
1049                 batch_copy[vq->batch_copy_nb_elems].log_addr = buf_iova;
1050                 batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
1051                 vq->batch_copy_nb_elems++;
1052         }
1053 }
1054
1055 static __rte_always_inline int
1056 mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
1057                 struct rte_mbuf *m, struct buf_vector *buf_vec,
1058                 uint16_t nr_vec, uint16_t num_buffers, bool is_async)
1059 {
1060         uint32_t vec_idx = 0;
1061         uint32_t mbuf_offset, mbuf_avail;
1062         uint32_t buf_offset, buf_avail;
1063         uint64_t buf_addr, buf_iova, buf_len;
1064         uint32_t cpy_len;
1065         uint64_t hdr_addr;
1066         struct rte_mbuf *hdr_mbuf;
1067         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
1068         struct vhost_async *async = vq->async;
1069
1070         if (unlikely(m == NULL))
1071                 return -1;
1072
1073         buf_addr = buf_vec[vec_idx].buf_addr;
1074         buf_iova = buf_vec[vec_idx].buf_iova;
1075         buf_len = buf_vec[vec_idx].buf_len;
1076
1077         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
1078                 return -1;
1079
1080         hdr_mbuf = m;
1081         hdr_addr = buf_addr;
1082         if (unlikely(buf_len < dev->vhost_hlen)) {
1083                 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
1084                 hdr = &tmp_hdr;
1085         } else
1086                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
1087
1088         VHOST_LOG_DATA(DEBUG, "(%s) RX: num merge buffers %d\n",
1089                 dev->ifname, num_buffers);
1090
1091         if (unlikely(buf_len < dev->vhost_hlen)) {
1092                 buf_offset = dev->vhost_hlen - buf_len;
1093                 vec_idx++;
1094                 buf_addr = buf_vec[vec_idx].buf_addr;
1095                 buf_iova = buf_vec[vec_idx].buf_iova;
1096                 buf_len = buf_vec[vec_idx].buf_len;
1097                 buf_avail = buf_len - buf_offset;
1098         } else {
1099                 buf_offset = dev->vhost_hlen;
1100                 buf_avail = buf_len - dev->vhost_hlen;
1101         }
1102
1103         mbuf_avail  = rte_pktmbuf_data_len(m);
1104         mbuf_offset = 0;
1105
1106         if (is_async) {
1107                 if (async_iter_initialize(dev, async))
1108                         return -1;
1109         }
1110
1111         while (mbuf_avail != 0 || m->next != NULL) {
1112                 /* done with current buf, get the next one */
1113                 if (buf_avail == 0) {
1114                         vec_idx++;
1115                         if (unlikely(vec_idx >= nr_vec))
1116                                 goto error;
1117
1118                         buf_addr = buf_vec[vec_idx].buf_addr;
1119                         buf_iova = buf_vec[vec_idx].buf_iova;
1120                         buf_len = buf_vec[vec_idx].buf_len;
1121
1122                         buf_offset = 0;
1123                         buf_avail  = buf_len;
1124                 }
1125
1126                 /* done with current mbuf, get the next one */
1127                 if (mbuf_avail == 0) {
1128                         m = m->next;
1129
1130                         mbuf_offset = 0;
1131                         mbuf_avail  = rte_pktmbuf_data_len(m);
1132                 }
1133
1134                 if (hdr_addr) {
1135                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1136                         if (rxvq_is_mergeable(dev))
1137                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1138                                                 num_buffers);
1139
1140                         if (unlikely(hdr == &tmp_hdr)) {
1141                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1142                         } else {
1143                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1144                                                 dev->vhost_hlen, 0);
1145                                 vhost_log_cache_write_iova(dev, vq,
1146                                                 buf_vec[0].buf_iova,
1147                                                 dev->vhost_hlen);
1148                         }
1149
1150                         hdr_addr = 0;
1151                 }
1152
1153                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1154
1155                 if (is_async) {
1156                         if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
1157                                                 buf_iova + buf_offset, cpy_len) < 0)
1158                                 goto error;
1159                 } else {
1160                         sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
1161                                         buf_addr + buf_offset,
1162                                         buf_iova + buf_offset, cpy_len);
1163                 }
1164
1165                 mbuf_avail  -= cpy_len;
1166                 mbuf_offset += cpy_len;
1167                 buf_avail  -= cpy_len;
1168                 buf_offset += cpy_len;
1169         }
1170
1171         if (is_async)
1172                 async_iter_finalize(async);
1173
1174         return 0;
1175 error:
1176         if (is_async)
1177                 async_iter_cancel(async);
1178
1179         return -1;
1180 }
1181
1182 static __rte_always_inline int
1183 vhost_enqueue_single_packed(struct virtio_net *dev,
1184                             struct vhost_virtqueue *vq,
1185                             struct rte_mbuf *pkt,
1186                             struct buf_vector *buf_vec,
1187                             uint16_t *nr_descs)
1188 {
1189         uint16_t nr_vec = 0;
1190         uint16_t avail_idx = vq->last_avail_idx;
1191         uint16_t max_tries, tries = 0;
1192         uint16_t buf_id = 0;
1193         uint32_t len = 0;
1194         uint16_t desc_count;
1195         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1196         uint16_t num_buffers = 0;
1197         uint32_t buffer_len[vq->size];
1198         uint16_t buffer_buf_id[vq->size];
1199         uint16_t buffer_desc_count[vq->size];
1200
1201         if (rxvq_is_mergeable(dev))
1202                 max_tries = vq->size - 1;
1203         else
1204                 max_tries = 1;
1205
1206         while (size > 0) {
1207                 /*
1208                  * if we tried all available ring items, and still
1209                  * can't get enough buf, it means something abnormal
1210                  * happened.
1211                  */
1212                 if (unlikely(++tries > max_tries))
1213                         return -1;
1214
1215                 if (unlikely(fill_vec_buf_packed(dev, vq,
1216                                                 avail_idx, &desc_count,
1217                                                 buf_vec, &nr_vec,
1218                                                 &buf_id, &len,
1219                                                 VHOST_ACCESS_RW) < 0))
1220                         return -1;
1221
1222                 len = RTE_MIN(len, size);
1223                 size -= len;
1224
1225                 buffer_len[num_buffers] = len;
1226                 buffer_buf_id[num_buffers] = buf_id;
1227                 buffer_desc_count[num_buffers] = desc_count;
1228                 num_buffers += 1;
1229
1230                 *nr_descs += desc_count;
1231                 avail_idx += desc_count;
1232                 if (avail_idx >= vq->size)
1233                         avail_idx -= vq->size;
1234         }
1235
1236         if (mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers, false) < 0)
1237                 return -1;
1238
1239         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1240                                            buffer_desc_count, num_buffers);
1241
1242         return 0;
1243 }
1244
1245 static __rte_noinline uint32_t
1246 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1247         struct rte_mbuf **pkts, uint32_t count)
1248 {
1249         uint32_t pkt_idx = 0;
1250         uint16_t num_buffers;
1251         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1252         uint16_t avail_head;
1253
1254         /*
1255          * The ordering between avail index and
1256          * desc reads needs to be enforced.
1257          */
1258         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1259
1260         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1261
1262         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1263                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1264                 uint16_t nr_vec = 0;
1265
1266                 if (unlikely(reserve_avail_buf_split(dev, vq,
1267                                                 pkt_len, buf_vec, &num_buffers,
1268                                                 avail_head, &nr_vec) < 0)) {
1269                         VHOST_LOG_DATA(DEBUG,
1270                                 "(%s) failed to get enough desc from vring\n",
1271                                 dev->ifname);
1272                         vq->shadow_used_idx -= num_buffers;
1273                         break;
1274                 }
1275
1276                 VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1277                         dev->ifname, vq->last_avail_idx,
1278                         vq->last_avail_idx + num_buffers);
1279
1280                 if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
1281                                         num_buffers, false) < 0) {
1282                         vq->shadow_used_idx -= num_buffers;
1283                         break;
1284                 }
1285
1286                 vq->last_avail_idx += num_buffers;
1287         }
1288
1289         do_data_copy_enqueue(dev, vq);
1290
1291         if (likely(vq->shadow_used_idx)) {
1292                 flush_shadow_used_ring_split(dev, vq);
1293                 vhost_vring_call_split(dev, vq);
1294         }
1295
1296         return pkt_idx;
1297 }
1298
1299 static __rte_always_inline int
1300 virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
1301                            struct vhost_virtqueue *vq,
1302                            struct rte_mbuf **pkts,
1303                            uint64_t *desc_addrs,
1304                            uint64_t *lens)
1305 {
1306         bool wrap_counter = vq->avail_wrap_counter;
1307         struct vring_packed_desc *descs = vq->desc_packed;
1308         uint16_t avail_idx = vq->last_avail_idx;
1309         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1310         uint16_t i;
1311
1312         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1313                 return -1;
1314
1315         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1316                 return -1;
1317
1318         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1319                 if (unlikely(pkts[i]->next != NULL))
1320                         return -1;
1321                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1322                                             wrap_counter)))
1323                         return -1;
1324         }
1325
1326         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1327                 lens[i] = descs[avail_idx + i].len;
1328
1329         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1330                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1331                         return -1;
1332         }
1333
1334         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1335                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1336                                                   descs[avail_idx + i].addr,
1337                                                   &lens[i],
1338                                                   VHOST_ACCESS_RW);
1339
1340         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1341                 if (unlikely(!desc_addrs[i]))
1342                         return -1;
1343                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1344                         return -1;
1345         }
1346
1347         return 0;
1348 }
1349
1350 static __rte_always_inline void
1351 virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
1352                            struct vhost_virtqueue *vq,
1353                            struct rte_mbuf **pkts,
1354                            uint64_t *desc_addrs,
1355                            uint64_t *lens)
1356 {
1357         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1358         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1359         struct vring_packed_desc *descs = vq->desc_packed;
1360         uint16_t avail_idx = vq->last_avail_idx;
1361         uint16_t ids[PACKED_BATCH_SIZE];
1362         uint16_t i;
1363
1364         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1365                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1366                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1367                                         (uintptr_t)desc_addrs[i];
1368                 lens[i] = pkts[i]->pkt_len +
1369                         sizeof(struct virtio_net_hdr_mrg_rxbuf);
1370         }
1371
1372         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1373                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1374
1375         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1376
1377         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1378                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1379                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1380                            pkts[i]->pkt_len);
1381         }
1382
1383         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1384                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1385                                            lens[i]);
1386
1387         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1388                 ids[i] = descs[avail_idx + i].id;
1389
1390         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1391 }
1392
1393 static __rte_always_inline int
1394 virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
1395                            struct vhost_virtqueue *vq,
1396                            struct rte_mbuf **pkts)
1397 {
1398         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1399         uint64_t lens[PACKED_BATCH_SIZE];
1400
1401         if (virtio_dev_rx_sync_batch_check(dev, vq, pkts, desc_addrs, lens) == -1)
1402                 return -1;
1403
1404         if (vq->shadow_used_idx) {
1405                 do_data_copy_enqueue(dev, vq);
1406                 vhost_flush_enqueue_shadow_packed(dev, vq);
1407         }
1408
1409         virtio_dev_rx_batch_packed_copy(dev, vq, pkts, desc_addrs, lens);
1410
1411         return 0;
1412 }
1413
1414 static __rte_always_inline int16_t
1415 virtio_dev_rx_single_packed(struct virtio_net *dev,
1416                             struct vhost_virtqueue *vq,
1417                             struct rte_mbuf *pkt)
1418 {
1419         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1420         uint16_t nr_descs = 0;
1421
1422         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1423                                                  &nr_descs) < 0)) {
1424                 VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
1425                                 dev->ifname);
1426                 return -1;
1427         }
1428
1429         VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1430                         dev->ifname, vq->last_avail_idx,
1431                         vq->last_avail_idx + nr_descs);
1432
1433         vq_inc_last_avail_packed(vq, nr_descs);
1434
1435         return 0;
1436 }
1437
1438 static __rte_noinline uint32_t
1439 virtio_dev_rx_packed(struct virtio_net *dev,
1440                      struct vhost_virtqueue *__rte_restrict vq,
1441                      struct rte_mbuf **__rte_restrict pkts,
1442                      uint32_t count)
1443 {
1444         uint32_t pkt_idx = 0;
1445
1446         do {
1447                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1448
1449                 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
1450                         if (!virtio_dev_rx_sync_batch_packed(dev, vq,
1451                                                         &pkts[pkt_idx])) {
1452                                 pkt_idx += PACKED_BATCH_SIZE;
1453                                 continue;
1454                         }
1455                 }
1456
1457                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1458                         break;
1459                 pkt_idx++;
1460
1461         } while (pkt_idx < count);
1462
1463         if (vq->shadow_used_idx) {
1464                 do_data_copy_enqueue(dev, vq);
1465                 vhost_flush_enqueue_shadow_packed(dev, vq);
1466         }
1467
1468         if (pkt_idx)
1469                 vhost_vring_call_packed(dev, vq);
1470
1471         return pkt_idx;
1472 }
1473
1474 static __rte_always_inline uint32_t
1475 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1476         struct rte_mbuf **pkts, uint32_t count)
1477 {
1478         struct vhost_virtqueue *vq;
1479         uint32_t nb_tx = 0;
1480
1481         VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
1482         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1483                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
1484                         dev->ifname, __func__, queue_id);
1485                 return 0;
1486         }
1487
1488         vq = dev->virtqueue[queue_id];
1489
1490         rte_spinlock_lock(&vq->access_lock);
1491
1492         if (unlikely(!vq->enabled))
1493                 goto out_access_unlock;
1494
1495         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1496                 vhost_user_iotlb_rd_lock(vq);
1497
1498         if (unlikely(!vq->access_ok))
1499                 if (unlikely(vring_translate(dev, vq) < 0))
1500                         goto out;
1501
1502         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1503         if (count == 0)
1504                 goto out;
1505
1506         if (vq_is_packed(dev))
1507                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1508         else
1509                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1510
1511 out:
1512         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1513                 vhost_user_iotlb_rd_unlock(vq);
1514
1515 out_access_unlock:
1516         rte_spinlock_unlock(&vq->access_lock);
1517
1518         return nb_tx;
1519 }
1520
1521 uint16_t
1522 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1523         struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1524 {
1525         struct virtio_net *dev = get_device(vid);
1526
1527         if (!dev)
1528                 return 0;
1529
1530         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1531                 VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
1532                         dev->ifname, __func__);
1533                 return 0;
1534         }
1535
1536         return virtio_dev_rx(dev, queue_id, pkts, count);
1537 }
1538
1539 static __rte_always_inline uint16_t
1540 async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
1541 {
1542         struct vhost_async *async = vq->async;
1543
1544         if (async->pkts_idx >= async->pkts_inflight_n)
1545                 return async->pkts_idx - async->pkts_inflight_n;
1546         else
1547                 return vq->size - async->pkts_inflight_n + async->pkts_idx;
1548 }
1549
1550 static __rte_always_inline void
1551 store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem *d_ring,
1552                 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1553 {
1554         size_t elem_size = sizeof(struct vring_used_elem);
1555
1556         if (d_idx + count <= ring_size) {
1557                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1558         } else {
1559                 uint16_t size = ring_size - d_idx;
1560
1561                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1562                 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1563         }
1564 }
1565
1566 static __rte_always_inline void
1567 store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
1568                 struct vring_used_elem_packed *d_ring,
1569                 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1570 {
1571         size_t elem_size = sizeof(struct vring_used_elem_packed);
1572
1573         if (d_idx + count <= ring_size) {
1574                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1575         } else {
1576                 uint16_t size = ring_size - d_idx;
1577
1578                 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1579                 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1580         }
1581 }
1582
1583 static __rte_noinline uint32_t
1584 virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1585                 uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
1586                 int16_t dma_id, uint16_t vchan_id)
1587 {
1588         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1589         uint32_t pkt_idx = 0;
1590         uint16_t num_buffers;
1591         uint16_t avail_head;
1592
1593         struct vhost_async *async = vq->async;
1594         struct async_inflight_info *pkts_info = async->pkts_info;
1595         uint32_t pkt_err = 0;
1596         uint16_t n_xfer;
1597         uint16_t slot_idx = 0;
1598
1599         /*
1600          * The ordering between avail index and desc reads need to be enforced.
1601          */
1602         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1603
1604         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1605
1606         async_iter_reset(async);
1607
1608         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1609                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1610                 uint16_t nr_vec = 0;
1611
1612                 if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
1613                                                 &num_buffers, avail_head, &nr_vec) < 0)) {
1614                         VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
1615                                         dev->ifname);
1616                         vq->shadow_used_idx -= num_buffers;
1617                         break;
1618                 }
1619
1620                 VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1621                         dev->ifname, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
1622
1623                 if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
1624                         vq->shadow_used_idx -= num_buffers;
1625                         break;
1626                 }
1627
1628                 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
1629                 pkts_info[slot_idx].descs = num_buffers;
1630                 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1631
1632                 vq->last_avail_idx += num_buffers;
1633         }
1634
1635         if (unlikely(pkt_idx == 0))
1636                 return 0;
1637
1638         n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
1639                         async->iov_iter, pkt_idx);
1640
1641         pkt_err = pkt_idx - n_xfer;
1642         if (unlikely(pkt_err)) {
1643                 uint16_t num_descs = 0;
1644
1645                 VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer %u packets for queue %u.\n",
1646                                 dev->ifname, __func__, pkt_err, queue_id);
1647
1648                 /* update number of completed packets */
1649                 pkt_idx = n_xfer;
1650
1651                 /* calculate the sum of descriptors to revert */
1652                 while (pkt_err-- > 0) {
1653                         num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
1654                         slot_idx--;
1655                 }
1656
1657                 /* recover shadow used ring and available ring */
1658                 vq->shadow_used_idx -= num_descs;
1659                 vq->last_avail_idx -= num_descs;
1660         }
1661
1662         /* keep used descriptors */
1663         if (likely(vq->shadow_used_idx)) {
1664                 uint16_t to = async->desc_idx_split & (vq->size - 1);
1665
1666                 store_dma_desc_info_split(vq->shadow_used_split,
1667                                 async->descs_split, vq->size, 0, to,
1668                                 vq->shadow_used_idx);
1669
1670                 async->desc_idx_split += vq->shadow_used_idx;
1671
1672                 async->pkts_idx += pkt_idx;
1673                 if (async->pkts_idx >= vq->size)
1674                         async->pkts_idx -= vq->size;
1675
1676                 async->pkts_inflight_n += pkt_idx;
1677                 vq->shadow_used_idx = 0;
1678         }
1679
1680         return pkt_idx;
1681 }
1682
1683
1684 static __rte_always_inline int
1685 vhost_enqueue_async_packed(struct virtio_net *dev,
1686                             struct vhost_virtqueue *vq,
1687                             struct rte_mbuf *pkt,
1688                             struct buf_vector *buf_vec,
1689                             uint16_t *nr_descs,
1690                             uint16_t *nr_buffers)
1691 {
1692         uint16_t nr_vec = 0;
1693         uint16_t avail_idx = vq->last_avail_idx;
1694         uint16_t max_tries, tries = 0;
1695         uint16_t buf_id = 0;
1696         uint32_t len = 0;
1697         uint16_t desc_count = 0;
1698         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1699         uint32_t buffer_len[vq->size];
1700         uint16_t buffer_buf_id[vq->size];
1701         uint16_t buffer_desc_count[vq->size];
1702
1703         if (rxvq_is_mergeable(dev))
1704                 max_tries = vq->size - 1;
1705         else
1706                 max_tries = 1;
1707
1708         while (size > 0) {
1709                 /*
1710                  * if we tried all available ring items, and still
1711                  * can't get enough buf, it means something abnormal
1712                  * happened.
1713                  */
1714                 if (unlikely(++tries > max_tries))
1715                         return -1;
1716
1717                 if (unlikely(fill_vec_buf_packed(dev, vq,
1718                                                 avail_idx, &desc_count,
1719                                                 buf_vec, &nr_vec,
1720                                                 &buf_id, &len,
1721                                                 VHOST_ACCESS_RW) < 0))
1722                         return -1;
1723
1724                 len = RTE_MIN(len, size);
1725                 size -= len;
1726
1727                 buffer_len[*nr_buffers] = len;
1728                 buffer_buf_id[*nr_buffers] = buf_id;
1729                 buffer_desc_count[*nr_buffers] = desc_count;
1730                 *nr_buffers += 1;
1731                 *nr_descs += desc_count;
1732                 avail_idx += desc_count;
1733                 if (avail_idx >= vq->size)
1734                         avail_idx -= vq->size;
1735         }
1736
1737         if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0))
1738                 return -1;
1739
1740         vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
1741
1742         return 0;
1743 }
1744
1745 static __rte_always_inline int16_t
1746 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1747                             struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
1748 {
1749         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1750
1751         if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
1752                                         nr_descs, nr_buffers) < 0)) {
1753                 VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n", dev->ifname);
1754                 return -1;
1755         }
1756
1757         VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1758                         dev->ifname, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
1759
1760         return 0;
1761 }
1762
1763 static __rte_always_inline void
1764 dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
1765                         uint32_t nr_err, uint32_t *pkt_idx)
1766 {
1767         uint16_t descs_err = 0;
1768         uint16_t buffers_err = 0;
1769         struct async_inflight_info *pkts_info = vq->async->pkts_info;
1770
1771         *pkt_idx -= nr_err;
1772         /* calculate the sum of buffers and descs of DMA-error packets. */
1773         while (nr_err-- > 0) {
1774                 descs_err += pkts_info[slot_idx % vq->size].descs;
1775                 buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
1776                 slot_idx--;
1777         }
1778
1779         if (vq->last_avail_idx >= descs_err) {
1780                 vq->last_avail_idx -= descs_err;
1781         } else {
1782                 vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
1783                 vq->avail_wrap_counter ^= 1;
1784         }
1785
1786         vq->shadow_used_idx -= buffers_err;
1787 }
1788
1789 static __rte_noinline uint32_t
1790 virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1791                 uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
1792                 int16_t dma_id, uint16_t vchan_id)
1793 {
1794         uint32_t pkt_idx = 0;
1795         uint32_t remained = count;
1796         uint16_t n_xfer;
1797         uint16_t num_buffers;
1798         uint16_t num_descs;
1799
1800         struct vhost_async *async = vq->async;
1801         struct async_inflight_info *pkts_info = async->pkts_info;
1802         uint32_t pkt_err = 0;
1803         uint16_t slot_idx = 0;
1804
1805         do {
1806                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1807
1808                 num_buffers = 0;
1809                 num_descs = 0;
1810                 if (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],
1811                                                 &num_descs, &num_buffers) < 0))
1812                         break;
1813
1814                 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
1815
1816                 pkts_info[slot_idx].descs = num_descs;
1817                 pkts_info[slot_idx].nr_buffers = num_buffers;
1818                 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1819
1820                 pkt_idx++;
1821                 remained--;
1822                 vq_inc_last_avail_packed(vq, num_descs);
1823         } while (pkt_idx < count);
1824
1825         if (unlikely(pkt_idx == 0))
1826                 return 0;
1827
1828         n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
1829                         async->iov_iter, pkt_idx);
1830
1831         async_iter_reset(async);
1832
1833         pkt_err = pkt_idx - n_xfer;
1834         if (unlikely(pkt_err)) {
1835                 VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer %u packets for queue %u.\n",
1836                                 dev->ifname, __func__, pkt_err, queue_id);
1837                 dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
1838         }
1839
1840         if (likely(vq->shadow_used_idx)) {
1841                 /* keep used descriptors. */
1842                 store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
1843                                         vq->size, 0, async->buffer_idx_packed,
1844                                         vq->shadow_used_idx);
1845
1846                 async->buffer_idx_packed += vq->shadow_used_idx;
1847                 if (async->buffer_idx_packed >= vq->size)
1848                         async->buffer_idx_packed -= vq->size;
1849
1850                 async->pkts_idx += pkt_idx;
1851                 if (async->pkts_idx >= vq->size)
1852                         async->pkts_idx -= vq->size;
1853
1854                 vq->shadow_used_idx = 0;
1855                 async->pkts_inflight_n += pkt_idx;
1856         }
1857
1858         return pkt_idx;
1859 }
1860
1861 static __rte_always_inline void
1862 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
1863 {
1864         struct vhost_async *async = vq->async;
1865         uint16_t nr_left = n_descs;
1866         uint16_t nr_copy;
1867         uint16_t to, from;
1868
1869         do {
1870                 from = async->last_desc_idx_split & (vq->size - 1);
1871                 nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
1872                 to = vq->last_used_idx & (vq->size - 1);
1873
1874                 if (to + nr_copy <= vq->size) {
1875                         rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1876                                         nr_copy * sizeof(struct vring_used_elem));
1877                 } else {
1878                         uint16_t size = vq->size - to;
1879
1880                         rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1881                                         size * sizeof(struct vring_used_elem));
1882                         rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
1883                                         (nr_copy - size) * sizeof(struct vring_used_elem));
1884                 }
1885
1886                 async->last_desc_idx_split += nr_copy;
1887                 vq->last_used_idx += nr_copy;
1888                 nr_left -= nr_copy;
1889         } while (nr_left > 0);
1890 }
1891
1892 static __rte_always_inline void
1893 write_back_completed_descs_packed(struct vhost_virtqueue *vq,
1894                                 uint16_t n_buffers)
1895 {
1896         struct vhost_async *async = vq->async;
1897         uint16_t from = async->last_buffer_idx_packed;
1898         uint16_t used_idx = vq->last_used_idx;
1899         uint16_t head_idx = vq->last_used_idx;
1900         uint16_t head_flags = 0;
1901         uint16_t i;
1902
1903         /* Split loop in two to save memory barriers */
1904         for (i = 0; i < n_buffers; i++) {
1905                 vq->desc_packed[used_idx].id = async->buffers_packed[from].id;
1906                 vq->desc_packed[used_idx].len = async->buffers_packed[from].len;
1907
1908                 used_idx += async->buffers_packed[from].count;
1909                 if (used_idx >= vq->size)
1910                         used_idx -= vq->size;
1911
1912                 from++;
1913                 if (from >= vq->size)
1914                         from = 0;
1915         }
1916
1917         /* The ordering for storing desc flags needs to be enforced. */
1918         rte_atomic_thread_fence(__ATOMIC_RELEASE);
1919
1920         from = async->last_buffer_idx_packed;
1921
1922         for (i = 0; i < n_buffers; i++) {
1923                 uint16_t flags;
1924
1925                 if (async->buffers_packed[from].len)
1926                         flags = VRING_DESC_F_WRITE;
1927                 else
1928                         flags = 0;
1929
1930                 if (vq->used_wrap_counter) {
1931                         flags |= VRING_DESC_F_USED;
1932                         flags |= VRING_DESC_F_AVAIL;
1933                 } else {
1934                         flags &= ~VRING_DESC_F_USED;
1935                         flags &= ~VRING_DESC_F_AVAIL;
1936                 }
1937
1938                 if (i > 0) {
1939                         vq->desc_packed[vq->last_used_idx].flags = flags;
1940                 } else {
1941                         head_idx = vq->last_used_idx;
1942                         head_flags = flags;
1943                 }
1944
1945                 vq_inc_last_used_packed(vq, async->buffers_packed[from].count);
1946
1947                 from++;
1948                 if (from == vq->size)
1949                         from = 0;
1950         }
1951
1952         vq->desc_packed[head_idx].flags = head_flags;
1953         async->last_buffer_idx_packed = from;
1954 }
1955
1956 static __rte_always_inline uint16_t
1957 vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
1958                 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
1959                 uint16_t vchan_id)
1960 {
1961         struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
1962         struct vhost_async *async = vq->async;
1963         struct async_inflight_info *pkts_info = async->pkts_info;
1964         uint16_t nr_cpl_pkts = 0;
1965         uint16_t n_descs = 0, n_buffers = 0;
1966         uint16_t start_idx, from, i;
1967
1968         /* Check completed copies for the given DMA vChannel */
1969         vhost_async_dma_check_completed(dev, dma_id, vchan_id, VHOST_DMA_MAX_COPY_COMPLETE);
1970
1971         start_idx = async_get_first_inflight_pkt_idx(vq);
1972         /**
1973          * Calculate the number of copy completed packets.
1974          * Note that there may be completed packets even if
1975          * no copies are reported done by the given DMA vChannel,
1976          * as it's possible that a virtqueue uses multiple DMA
1977          * vChannels.
1978          */
1979         from = start_idx;
1980         while (vq->async->pkts_cmpl_flag[from] && count--) {
1981                 vq->async->pkts_cmpl_flag[from] = false;
1982                 from++;
1983                 if (from >= vq->size)
1984                         from -= vq->size;
1985                 nr_cpl_pkts++;
1986         }
1987
1988         if (nr_cpl_pkts == 0)
1989                 return 0;
1990
1991         for (i = 0; i < nr_cpl_pkts; i++) {
1992                 from = (start_idx + i) % vq->size;
1993                 /* Only used with packed ring */
1994                 n_buffers += pkts_info[from].nr_buffers;
1995                 /* Only used with split ring */
1996                 n_descs += pkts_info[from].descs;
1997                 pkts[i] = pkts_info[from].mbuf;
1998         }
1999
2000         async->pkts_inflight_n -= nr_cpl_pkts;
2001
2002         if (likely(vq->enabled && vq->access_ok)) {
2003                 if (vq_is_packed(dev)) {
2004                         write_back_completed_descs_packed(vq, n_buffers);
2005                         vhost_vring_call_packed(dev, vq);
2006                 } else {
2007                         write_back_completed_descs_split(vq, n_descs);
2008                         __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
2009                         vhost_vring_call_split(dev, vq);
2010                 }
2011         } else {
2012                 if (vq_is_packed(dev)) {
2013                         async->last_buffer_idx_packed += n_buffers;
2014                         if (async->last_buffer_idx_packed >= vq->size)
2015                                 async->last_buffer_idx_packed -= vq->size;
2016                 } else {
2017                         async->last_desc_idx_split += n_descs;
2018                 }
2019         }
2020
2021         return nr_cpl_pkts;
2022 }
2023
2024 uint16_t
2025 rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
2026                 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2027                 uint16_t vchan_id)
2028 {
2029         struct virtio_net *dev = get_device(vid);
2030         struct vhost_virtqueue *vq;
2031         uint16_t n_pkts_cpl = 0;
2032
2033         if (unlikely(!dev))
2034                 return 0;
2035
2036         VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2037         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2038                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
2039                         dev->ifname, __func__, queue_id);
2040                 return 0;
2041         }
2042
2043         if (unlikely(!dma_copy_track[dma_id].vchans ||
2044                                 !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
2045                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
2046                                dma_id, vchan_id);
2047                 return 0;
2048         }
2049
2050         vq = dev->virtqueue[queue_id];
2051
2052         if (!rte_spinlock_trylock(&vq->access_lock)) {
2053                 VHOST_LOG_DATA(DEBUG, "(%s) %s: virtqueue %u is busy.\n", dev->ifname, __func__,
2054                                 queue_id);
2055                 return 0;
2056         }
2057
2058         if (unlikely(!vq->async)) {
2059                 VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for virtqueue %d.\n",
2060                                 dev->ifname, __func__, queue_id);
2061                 goto out;
2062         }
2063
2064         n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
2065
2066 out:
2067         rte_spinlock_unlock(&vq->access_lock);
2068
2069         return n_pkts_cpl;
2070 }
2071
2072 uint16_t
2073 rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
2074                 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2075                 uint16_t vchan_id)
2076 {
2077         struct virtio_net *dev = get_device(vid);
2078         struct vhost_virtqueue *vq;
2079         uint16_t n_pkts_cpl = 0;
2080
2081         if (!dev)
2082                 return 0;
2083
2084         VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2085         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2086                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
2087                         dev->ifname, __func__, queue_id);
2088                 return 0;
2089         }
2090
2091         vq = dev->virtqueue[queue_id];
2092
2093         if (unlikely(!vq->async)) {
2094                 VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
2095                         dev->ifname, __func__, queue_id);
2096                 return 0;
2097         }
2098
2099         if (unlikely(!dma_copy_track[dma_id].vchans ||
2100                                 !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
2101                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
2102                                 dma_id, vchan_id);
2103                 return 0;
2104         }
2105
2106         n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
2107
2108         return n_pkts_cpl;
2109 }
2110
2111 static __rte_always_inline uint32_t
2112 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
2113         struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
2114 {
2115         struct vhost_virtqueue *vq;
2116         uint32_t nb_tx = 0;
2117
2118         VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2119         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2120                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
2121                         dev->ifname, __func__, queue_id);
2122                 return 0;
2123         }
2124
2125         if (unlikely(!dma_copy_track[dma_id].vchans ||
2126                                 !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
2127                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
2128                                dma_id, vchan_id);
2129                 return 0;
2130         }
2131
2132         vq = dev->virtqueue[queue_id];
2133
2134         rte_spinlock_lock(&vq->access_lock);
2135
2136         if (unlikely(!vq->enabled || !vq->async))
2137                 goto out_access_unlock;
2138
2139         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2140                 vhost_user_iotlb_rd_lock(vq);
2141
2142         if (unlikely(!vq->access_ok))
2143                 if (unlikely(vring_translate(dev, vq) < 0))
2144                         goto out;
2145
2146         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
2147         if (count == 0)
2148                 goto out;
2149
2150         if (vq_is_packed(dev))
2151                 nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, queue_id,
2152                                 pkts, count, dma_id, vchan_id);
2153         else
2154                 nb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,
2155                                 pkts, count, dma_id, vchan_id);
2156
2157 out:
2158         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2159                 vhost_user_iotlb_rd_unlock(vq);
2160
2161 out_access_unlock:
2162         rte_spinlock_unlock(&vq->access_lock);
2163
2164         return nb_tx;
2165 }
2166
2167 uint16_t
2168 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
2169                 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2170                 uint16_t vchan_id)
2171 {
2172         struct virtio_net *dev = get_device(vid);
2173
2174         if (!dev)
2175                 return 0;
2176
2177         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2178                 VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
2179                         dev->ifname, __func__);
2180                 return 0;
2181         }
2182
2183         return virtio_dev_rx_async_submit(dev, queue_id, pkts, count, dma_id, vchan_id);
2184 }
2185
2186 static inline bool
2187 virtio_net_with_host_offload(struct virtio_net *dev)
2188 {
2189         if (dev->features &
2190                         ((1ULL << VIRTIO_NET_F_CSUM) |
2191                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
2192                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2193                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
2194                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
2195                 return true;
2196
2197         return false;
2198 }
2199
2200 static int
2201 parse_headers(struct rte_mbuf *m, uint8_t *l4_proto)
2202 {
2203         struct rte_ipv4_hdr *ipv4_hdr;
2204         struct rte_ipv6_hdr *ipv6_hdr;
2205         struct rte_ether_hdr *eth_hdr;
2206         uint16_t ethertype;
2207         uint16_t data_len = rte_pktmbuf_data_len(m);
2208
2209         if (data_len < sizeof(struct rte_ether_hdr))
2210                 return -EINVAL;
2211
2212         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2213
2214         m->l2_len = sizeof(struct rte_ether_hdr);
2215         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
2216
2217         if (ethertype == RTE_ETHER_TYPE_VLAN) {
2218                 if (data_len < sizeof(struct rte_ether_hdr) +
2219                                 sizeof(struct rte_vlan_hdr))
2220                         goto error;
2221
2222                 struct rte_vlan_hdr *vlan_hdr =
2223                         (struct rte_vlan_hdr *)(eth_hdr + 1);
2224
2225                 m->l2_len += sizeof(struct rte_vlan_hdr);
2226                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
2227         }
2228
2229         switch (ethertype) {
2230         case RTE_ETHER_TYPE_IPV4:
2231                 if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))
2232                         goto error;
2233                 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2234                                 m->l2_len);
2235                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
2236                 if (data_len < m->l2_len + m->l3_len)
2237                         goto error;
2238                 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
2239                 *l4_proto = ipv4_hdr->next_proto_id;
2240                 break;
2241         case RTE_ETHER_TYPE_IPV6:
2242                 if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
2243                         goto error;
2244                 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
2245                                 m->l2_len);
2246                 m->l3_len = sizeof(struct rte_ipv6_hdr);
2247                 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
2248                 *l4_proto = ipv6_hdr->proto;
2249                 break;
2250         default:
2251                 /* a valid L3 header is needed for further L4 parsing */
2252                 goto error;
2253         }
2254
2255         /* both CSUM and GSO need a valid L4 header */
2256         switch (*l4_proto) {
2257         case IPPROTO_TCP:
2258                 if (data_len < m->l2_len + m->l3_len +
2259                                 sizeof(struct rte_tcp_hdr))
2260                         goto error;
2261                 break;
2262         case IPPROTO_UDP:
2263                 if (data_len < m->l2_len + m->l3_len +
2264                                 sizeof(struct rte_udp_hdr))
2265                         goto error;
2266                 break;
2267         case IPPROTO_SCTP:
2268                 if (data_len < m->l2_len + m->l3_len +
2269                                 sizeof(struct rte_sctp_hdr))
2270                         goto error;
2271                 break;
2272         default:
2273                 goto error;
2274         }
2275
2276         return 0;
2277
2278 error:
2279         m->l2_len = 0;
2280         m->l3_len = 0;
2281         m->ol_flags = 0;
2282         return -EINVAL;
2283 }
2284
2285 static __rte_always_inline void
2286 vhost_dequeue_offload_legacy(struct virtio_net *dev, struct virtio_net_hdr *hdr,
2287                 struct rte_mbuf *m)
2288 {
2289         uint8_t l4_proto = 0;
2290         struct rte_tcp_hdr *tcp_hdr = NULL;
2291         uint16_t tcp_len;
2292         uint16_t data_len = rte_pktmbuf_data_len(m);
2293
2294         if (parse_headers(m, &l4_proto) < 0)
2295                 return;
2296
2297         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2298                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
2299                         switch (hdr->csum_offset) {
2300                         case (offsetof(struct rte_tcp_hdr, cksum)):
2301                                 if (l4_proto != IPPROTO_TCP)
2302                                         goto error;
2303                                 m->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
2304                                 break;
2305                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
2306                                 if (l4_proto != IPPROTO_UDP)
2307                                         goto error;
2308                                 m->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
2309                                 break;
2310                         case (offsetof(struct rte_sctp_hdr, cksum)):
2311                                 if (l4_proto != IPPROTO_SCTP)
2312                                         goto error;
2313                                 m->ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
2314                                 break;
2315                         default:
2316                                 goto error;
2317                         }
2318                 } else {
2319                         goto error;
2320                 }
2321         }
2322
2323         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2324                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2325                 case VIRTIO_NET_HDR_GSO_TCPV4:
2326                 case VIRTIO_NET_HDR_GSO_TCPV6:
2327                         if (l4_proto != IPPROTO_TCP)
2328                                 goto error;
2329                         tcp_hdr = rte_pktmbuf_mtod_offset(m,
2330                                         struct rte_tcp_hdr *,
2331                                         m->l2_len + m->l3_len);
2332                         tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
2333                         if (data_len < m->l2_len + m->l3_len + tcp_len)
2334                                 goto error;
2335                         m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
2336                         m->tso_segsz = hdr->gso_size;
2337                         m->l4_len = tcp_len;
2338                         break;
2339                 case VIRTIO_NET_HDR_GSO_UDP:
2340                         if (l4_proto != IPPROTO_UDP)
2341                                 goto error;
2342                         m->ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
2343                         m->tso_segsz = hdr->gso_size;
2344                         m->l4_len = sizeof(struct rte_udp_hdr);
2345                         break;
2346                 default:
2347                         VHOST_LOG_DATA(WARNING, "(%s) unsupported gso type %u.\n",
2348                                         dev->ifname, hdr->gso_type);
2349                         goto error;
2350                 }
2351         }
2352         return;
2353
2354 error:
2355         m->l2_len = 0;
2356         m->l3_len = 0;
2357         m->ol_flags = 0;
2358 }
2359
2360 static __rte_always_inline void
2361 vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr,
2362                 struct rte_mbuf *m, bool legacy_ol_flags)
2363 {
2364         struct rte_net_hdr_lens hdr_lens;
2365         int l4_supported = 0;
2366         uint32_t ptype;
2367
2368         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
2369                 return;
2370
2371         if (legacy_ol_flags) {
2372                 vhost_dequeue_offload_legacy(dev, hdr, m);
2373                 return;
2374         }
2375
2376         m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
2377
2378         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
2379         m->packet_type = ptype;
2380         if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
2381             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
2382             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
2383                 l4_supported = 1;
2384
2385         /* According to Virtio 1.1 spec, the device only needs to look at
2386          * VIRTIO_NET_HDR_F_NEEDS_CSUM in the packet transmission path.
2387          * This differs from the processing incoming packets path where the
2388          * driver could rely on VIRTIO_NET_HDR_F_DATA_VALID flag set by the
2389          * device.
2390          *
2391          * 5.1.6.2.1 Driver Requirements: Packet Transmission
2392          * The driver MUST NOT set the VIRTIO_NET_HDR_F_DATA_VALID and
2393          * VIRTIO_NET_HDR_F_RSC_INFO bits in flags.
2394          *
2395          * 5.1.6.2.2 Device Requirements: Packet Transmission
2396          * The device MUST ignore flag bits that it does not recognize.
2397          */
2398         if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2399                 uint32_t hdrlen;
2400
2401                 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
2402                 if (hdr->csum_start <= hdrlen && l4_supported != 0) {
2403                         m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
2404                 } else {
2405                         /* Unknown proto or tunnel, do sw cksum. We can assume
2406                          * the cksum field is in the first segment since the
2407                          * buffers we provided to the host are large enough.
2408                          * In case of SCTP, this will be wrong since it's a CRC
2409                          * but there's nothing we can do.
2410                          */
2411                         uint16_t csum = 0, off;
2412
2413                         if (rte_raw_cksum_mbuf(m, hdr->csum_start,
2414                                         rte_pktmbuf_pkt_len(m) - hdr->csum_start, &csum) < 0)
2415                                 return;
2416                         if (likely(csum != 0xffff))
2417                                 csum = ~csum;
2418                         off = hdr->csum_offset + hdr->csum_start;
2419                         if (rte_pktmbuf_data_len(m) >= off + 1)
2420                                 *rte_pktmbuf_mtod_offset(m, uint16_t *, off) = csum;
2421                 }
2422         }
2423
2424         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2425                 if (hdr->gso_size == 0)
2426                         return;
2427
2428                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2429                 case VIRTIO_NET_HDR_GSO_TCPV4:
2430                 case VIRTIO_NET_HDR_GSO_TCPV6:
2431                         if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
2432                                 break;
2433                         m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2434                         m->tso_segsz = hdr->gso_size;
2435                         break;
2436                 case VIRTIO_NET_HDR_GSO_UDP:
2437                         if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
2438                                 break;
2439                         m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2440                         m->tso_segsz = hdr->gso_size;
2441                         break;
2442                 default:
2443                         break;
2444                 }
2445         }
2446 }
2447
2448 static __rte_noinline void
2449 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
2450                 struct buf_vector *buf_vec)
2451 {
2452         uint64_t len;
2453         uint64_t remain = sizeof(struct virtio_net_hdr);
2454         uint64_t src;
2455         uint64_t dst = (uint64_t)(uintptr_t)hdr;
2456
2457         while (remain) {
2458                 len = RTE_MIN(remain, buf_vec->buf_len);
2459                 src = buf_vec->buf_addr;
2460                 rte_memcpy((void *)(uintptr_t)dst,
2461                                 (void *)(uintptr_t)src, len);
2462
2463                 remain -= len;
2464                 dst += len;
2465                 buf_vec++;
2466         }
2467 }
2468
2469 static __rte_always_inline int
2470 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
2471                   struct buf_vector *buf_vec, uint16_t nr_vec,
2472                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
2473                   bool legacy_ol_flags)
2474 {
2475         uint32_t buf_avail, buf_offset;
2476         uint64_t buf_addr, buf_len;
2477         uint32_t mbuf_avail, mbuf_offset;
2478         uint32_t cpy_len;
2479         struct rte_mbuf *cur = m, *prev = m;
2480         struct virtio_net_hdr tmp_hdr;
2481         struct virtio_net_hdr *hdr = NULL;
2482         /* A counter to avoid desc dead loop chain */
2483         uint16_t vec_idx = 0;
2484         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
2485         int error = 0;
2486
2487         buf_addr = buf_vec[vec_idx].buf_addr;
2488         buf_len = buf_vec[vec_idx].buf_len;
2489
2490         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
2491                 error = -1;
2492                 goto out;
2493         }
2494
2495         if (virtio_net_with_host_offload(dev)) {
2496                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
2497                         /*
2498                          * No luck, the virtio-net header doesn't fit
2499                          * in a contiguous virtual area.
2500                          */
2501                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
2502                         hdr = &tmp_hdr;
2503                 } else {
2504                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
2505                 }
2506         }
2507
2508         /*
2509          * A virtio driver normally uses at least 2 desc buffers
2510          * for Tx: the first for storing the header, and others
2511          * for storing the data.
2512          */
2513         if (unlikely(buf_len < dev->vhost_hlen)) {
2514                 buf_offset = dev->vhost_hlen - buf_len;
2515                 vec_idx++;
2516                 buf_addr = buf_vec[vec_idx].buf_addr;
2517                 buf_len = buf_vec[vec_idx].buf_len;
2518                 buf_avail  = buf_len - buf_offset;
2519         } else if (buf_len == dev->vhost_hlen) {
2520                 if (unlikely(++vec_idx >= nr_vec))
2521                         goto out;
2522                 buf_addr = buf_vec[vec_idx].buf_addr;
2523                 buf_len = buf_vec[vec_idx].buf_len;
2524
2525                 buf_offset = 0;
2526                 buf_avail = buf_len;
2527         } else {
2528                 buf_offset = dev->vhost_hlen;
2529                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
2530         }
2531
2532         PRINT_PACKET(dev,
2533                         (uintptr_t)(buf_addr + buf_offset),
2534                         (uint32_t)buf_avail, 0);
2535
2536         mbuf_offset = 0;
2537         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
2538         while (1) {
2539                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
2540
2541                 if (likely(cpy_len > MAX_BATCH_LEN ||
2542                                         vq->batch_copy_nb_elems >= vq->size ||
2543                                         (hdr && cur == m))) {
2544                         rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
2545                                                 mbuf_offset),
2546                                         (void *)((uintptr_t)(buf_addr +
2547                                                         buf_offset)), cpy_len);
2548                 } else {
2549                         batch_copy[vq->batch_copy_nb_elems].dst =
2550                                 rte_pktmbuf_mtod_offset(cur, void *,
2551                                                 mbuf_offset);
2552                         batch_copy[vq->batch_copy_nb_elems].src =
2553                                 (void *)((uintptr_t)(buf_addr + buf_offset));
2554                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
2555                         vq->batch_copy_nb_elems++;
2556                 }
2557
2558                 mbuf_avail  -= cpy_len;
2559                 mbuf_offset += cpy_len;
2560                 buf_avail -= cpy_len;
2561                 buf_offset += cpy_len;
2562
2563                 /* This buf reaches to its end, get the next one */
2564                 if (buf_avail == 0) {
2565                         if (++vec_idx >= nr_vec)
2566                                 break;
2567
2568                         buf_addr = buf_vec[vec_idx].buf_addr;
2569                         buf_len = buf_vec[vec_idx].buf_len;
2570
2571                         buf_offset = 0;
2572                         buf_avail  = buf_len;
2573
2574                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
2575                                         (uint32_t)buf_avail, 0);
2576                 }
2577
2578                 /*
2579                  * This mbuf reaches to its end, get a new one
2580                  * to hold more data.
2581                  */
2582                 if (mbuf_avail == 0) {
2583                         cur = rte_pktmbuf_alloc(mbuf_pool);
2584                         if (unlikely(cur == NULL)) {
2585                                 VHOST_LOG_DATA(ERR, "(%s) failed to allocate memory for mbuf.\n",
2586                                                 dev->ifname);
2587                                 error = -1;
2588                                 goto out;
2589                         }
2590
2591                         prev->next = cur;
2592                         prev->data_len = mbuf_offset;
2593                         m->nb_segs += 1;
2594                         m->pkt_len += mbuf_offset;
2595                         prev = cur;
2596
2597                         mbuf_offset = 0;
2598                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2599                 }
2600         }
2601
2602         prev->data_len = mbuf_offset;
2603         m->pkt_len    += mbuf_offset;
2604
2605         if (hdr)
2606                 vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
2607
2608 out:
2609
2610         return error;
2611 }
2612
2613 static void
2614 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2615 {
2616         rte_free(opaque);
2617 }
2618
2619 static int
2620 virtio_dev_extbuf_alloc(struct virtio_net *dev, struct rte_mbuf *pkt, uint32_t size)
2621 {
2622         struct rte_mbuf_ext_shared_info *shinfo = NULL;
2623         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2624         uint16_t buf_len;
2625         rte_iova_t iova;
2626         void *buf;
2627
2628         total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2629         total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2630
2631         if (unlikely(total_len > UINT16_MAX))
2632                 return -ENOSPC;
2633
2634         buf_len = total_len;
2635         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2636         if (unlikely(buf == NULL))
2637                 return -ENOMEM;
2638
2639         /* Initialize shinfo */
2640         shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2641                                                 virtio_dev_extbuf_free, buf);
2642         if (unlikely(shinfo == NULL)) {
2643                 rte_free(buf);
2644                 VHOST_LOG_DATA(ERR, "(%s) failed to init shinfo\n", dev->ifname);
2645                 return -1;
2646         }
2647
2648         iova = rte_malloc_virt2iova(buf);
2649         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2650         rte_pktmbuf_reset_headroom(pkt);
2651
2652         return 0;
2653 }
2654
2655 /*
2656  * Prepare a host supported pktmbuf.
2657  */
2658 static __rte_always_inline int
2659 virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
2660                          uint32_t data_len)
2661 {
2662         if (rte_pktmbuf_tailroom(pkt) >= data_len)
2663                 return 0;
2664
2665         /* attach an external buffer if supported */
2666         if (dev->extbuf && !virtio_dev_extbuf_alloc(dev, pkt, data_len))
2667                 return 0;
2668
2669         /* check if chained buffers are allowed */
2670         if (!dev->linearbuf)
2671                 return 0;
2672
2673         return -1;
2674 }
2675
2676 __rte_always_inline
2677 static uint16_t
2678 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2679         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
2680         bool legacy_ol_flags)
2681 {
2682         uint16_t i;
2683         uint16_t free_entries;
2684         uint16_t dropped = 0;
2685         static bool allocerr_warned;
2686
2687         /*
2688          * The ordering between avail index and
2689          * desc reads needs to be enforced.
2690          */
2691         free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2692                         vq->last_avail_idx;
2693         if (free_entries == 0)
2694                 return 0;
2695
2696         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2697
2698         VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2699
2700         count = RTE_MIN(count, MAX_PKT_BURST);
2701         count = RTE_MIN(count, free_entries);
2702         VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
2703                         dev->ifname, count);
2704
2705         if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2706                 return 0;
2707
2708         for (i = 0; i < count; i++) {
2709                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2710                 uint16_t head_idx;
2711                 uint32_t buf_len;
2712                 uint16_t nr_vec = 0;
2713                 int err;
2714
2715                 if (unlikely(fill_vec_buf_split(dev, vq,
2716                                                 vq->last_avail_idx + i,
2717                                                 &nr_vec, buf_vec,
2718                                                 &head_idx, &buf_len,
2719                                                 VHOST_ACCESS_RO) < 0))
2720                         break;
2721
2722                 update_shadow_used_ring_split(vq, head_idx, 0);
2723
2724                 err = virtio_dev_pktmbuf_prep(dev, pkts[i], buf_len);
2725                 if (unlikely(err)) {
2726                         /*
2727                          * mbuf allocation fails for jumbo packets when external
2728                          * buffer allocation is not allowed and linear buffer
2729                          * is required. Drop this packet.
2730                          */
2731                         if (!allocerr_warned) {
2732                                 VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
2733                                         dev->ifname, buf_len, mbuf_pool->name);
2734                                 allocerr_warned = true;
2735                         }
2736                         dropped += 1;
2737                         i++;
2738                         break;
2739                 }
2740
2741                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2742                                 mbuf_pool, legacy_ol_flags);
2743                 if (unlikely(err)) {
2744                         if (!allocerr_warned) {
2745                                 VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
2746                                         dev->ifname);
2747                                 allocerr_warned = true;
2748                         }
2749                         dropped += 1;
2750                         i++;
2751                         break;
2752                 }
2753         }
2754
2755         if (dropped)
2756                 rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1);
2757
2758         vq->last_avail_idx += i;
2759
2760         do_data_copy_dequeue(vq);
2761         if (unlikely(i < count))
2762                 vq->shadow_used_idx = i;
2763         if (likely(vq->shadow_used_idx)) {
2764                 flush_shadow_used_ring_split(dev, vq);
2765                 vhost_vring_call_split(dev, vq);
2766         }
2767
2768         return (i - dropped);
2769 }
2770
2771 __rte_noinline
2772 static uint16_t
2773 virtio_dev_tx_split_legacy(struct virtio_net *dev,
2774         struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2775         struct rte_mbuf **pkts, uint16_t count)
2776 {
2777         return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
2778 }
2779
2780 __rte_noinline
2781 static uint16_t
2782 virtio_dev_tx_split_compliant(struct virtio_net *dev,
2783         struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2784         struct rte_mbuf **pkts, uint16_t count)
2785 {
2786         return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
2787 }
2788
2789 static __rte_always_inline int
2790 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2791                                  struct vhost_virtqueue *vq,
2792                                  struct rte_mbuf **pkts,
2793                                  uint16_t avail_idx,
2794                                  uintptr_t *desc_addrs,
2795                                  uint16_t *ids)
2796 {
2797         bool wrap = vq->avail_wrap_counter;
2798         struct vring_packed_desc *descs = vq->desc_packed;
2799         uint64_t lens[PACKED_BATCH_SIZE];
2800         uint64_t buf_lens[PACKED_BATCH_SIZE];
2801         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2802         uint16_t flags, i;
2803
2804         if (unlikely(avail_idx & PACKED_BATCH_MASK))
2805                 return -1;
2806         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2807                 return -1;
2808
2809         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2810                 flags = descs[avail_idx + i].flags;
2811                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2812                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
2813                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2814                         return -1;
2815         }
2816
2817         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2818
2819         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2820                 lens[i] = descs[avail_idx + i].len;
2821
2822         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2823                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2824                                                   descs[avail_idx + i].addr,
2825                                                   &lens[i], VHOST_ACCESS_RW);
2826         }
2827
2828         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2829                 if (unlikely(!desc_addrs[i]))
2830                         return -1;
2831                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2832                         return -1;
2833         }
2834
2835         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2836                 if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
2837                         goto err;
2838         }
2839
2840         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2841                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2842
2843         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2844                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2845                         goto err;
2846         }
2847
2848         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2849                 pkts[i]->pkt_len = lens[i] - buf_offset;
2850                 pkts[i]->data_len = pkts[i]->pkt_len;
2851                 ids[i] = descs[avail_idx + i].id;
2852         }
2853
2854         return 0;
2855
2856 err:
2857         return -1;
2858 }
2859
2860 static __rte_always_inline int
2861 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2862                            struct vhost_virtqueue *vq,
2863                            struct rte_mbuf **pkts,
2864                            bool legacy_ol_flags)
2865 {
2866         uint16_t avail_idx = vq->last_avail_idx;
2867         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2868         struct virtio_net_hdr *hdr;
2869         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2870         uint16_t ids[PACKED_BATCH_SIZE];
2871         uint16_t i;
2872
2873         if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
2874                                              desc_addrs, ids))
2875                 return -1;
2876
2877         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2878                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2879
2880         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2881                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2882                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2883                            pkts[i]->pkt_len);
2884
2885         if (virtio_net_with_host_offload(dev)) {
2886                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2887                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2888                         vhost_dequeue_offload(dev, hdr, pkts[i], legacy_ol_flags);
2889                 }
2890         }
2891
2892         if (virtio_net_is_inorder(dev))
2893                 vhost_shadow_dequeue_batch_packed_inorder(vq,
2894                         ids[PACKED_BATCH_SIZE - 1]);
2895         else
2896                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2897
2898         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2899
2900         return 0;
2901 }
2902
2903 static __rte_always_inline int
2904 vhost_dequeue_single_packed(struct virtio_net *dev,
2905                             struct vhost_virtqueue *vq,
2906                             struct rte_mempool *mbuf_pool,
2907                             struct rte_mbuf *pkts,
2908                             uint16_t *buf_id,
2909                             uint16_t *desc_count,
2910                             bool legacy_ol_flags)
2911 {
2912         struct buf_vector buf_vec[BUF_VECTOR_MAX];
2913         uint32_t buf_len;
2914         uint16_t nr_vec = 0;
2915         int err;
2916         static bool allocerr_warned;
2917
2918         if (unlikely(fill_vec_buf_packed(dev, vq,
2919                                          vq->last_avail_idx, desc_count,
2920                                          buf_vec, &nr_vec,
2921                                          buf_id, &buf_len,
2922                                          VHOST_ACCESS_RO) < 0))
2923                 return -1;
2924
2925         if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
2926                 if (!allocerr_warned) {
2927                         VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
2928                                 dev->ifname, buf_len, mbuf_pool->name);
2929                         allocerr_warned = true;
2930                 }
2931                 return -1;
2932         }
2933
2934         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
2935                                 mbuf_pool, legacy_ol_flags);
2936         if (unlikely(err)) {
2937                 if (!allocerr_warned) {
2938                         VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
2939                                 dev->ifname);
2940                         allocerr_warned = true;
2941                 }
2942                 return -1;
2943         }
2944
2945         return 0;
2946 }
2947
2948 static __rte_always_inline int
2949 virtio_dev_tx_single_packed(struct virtio_net *dev,
2950                             struct vhost_virtqueue *vq,
2951                             struct rte_mempool *mbuf_pool,
2952                             struct rte_mbuf *pkts,
2953                             bool legacy_ol_flags)
2954 {
2955
2956         uint16_t buf_id, desc_count = 0;
2957         int ret;
2958
2959         ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2960                                         &desc_count, legacy_ol_flags);
2961
2962         if (likely(desc_count > 0)) {
2963                 if (virtio_net_is_inorder(dev))
2964                         vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
2965                                                                    desc_count);
2966                 else
2967                         vhost_shadow_dequeue_single_packed(vq, buf_id,
2968                                         desc_count);
2969
2970                 vq_inc_last_avail_packed(vq, desc_count);
2971         }
2972
2973         return ret;
2974 }
2975
2976 __rte_always_inline
2977 static uint16_t
2978 virtio_dev_tx_packed(struct virtio_net *dev,
2979                      struct vhost_virtqueue *__rte_restrict vq,
2980                      struct rte_mempool *mbuf_pool,
2981                      struct rte_mbuf **__rte_restrict pkts,
2982                      uint32_t count,
2983                      bool legacy_ol_flags)
2984 {
2985         uint32_t pkt_idx = 0;
2986
2987         if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2988                 return 0;
2989
2990         do {
2991                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2992
2993                 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
2994                         if (!virtio_dev_tx_batch_packed(dev, vq,
2995                                                         &pkts[pkt_idx],
2996                                                         legacy_ol_flags)) {
2997                                 pkt_idx += PACKED_BATCH_SIZE;
2998                                 continue;
2999                         }
3000                 }
3001
3002                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
3003                                                 pkts[pkt_idx],
3004                                                 legacy_ol_flags))
3005                         break;
3006                 pkt_idx++;
3007         } while (pkt_idx < count);
3008
3009         if (pkt_idx != count)
3010                 rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
3011
3012         if (vq->shadow_used_idx) {
3013                 do_data_copy_dequeue(vq);
3014
3015                 vhost_flush_dequeue_shadow_packed(dev, vq);
3016                 vhost_vring_call_packed(dev, vq);
3017         }
3018
3019         return pkt_idx;
3020 }
3021
3022 __rte_noinline
3023 static uint16_t
3024 virtio_dev_tx_packed_legacy(struct virtio_net *dev,
3025         struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3026         struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3027 {
3028         return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
3029 }
3030
3031 __rte_noinline
3032 static uint16_t
3033 virtio_dev_tx_packed_compliant(struct virtio_net *dev,
3034         struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3035         struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3036 {
3037         return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
3038 }
3039
3040 uint16_t
3041 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
3042         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
3043 {
3044         struct virtio_net *dev;
3045         struct rte_mbuf *rarp_mbuf = NULL;
3046         struct vhost_virtqueue *vq;
3047         int16_t success = 1;
3048
3049         dev = get_device(vid);
3050         if (!dev)
3051                 return 0;
3052
3053         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
3054                 VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
3055                                 dev->ifname, __func__);
3056                 return 0;
3057         }
3058
3059         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
3060                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
3061                                 dev->ifname, __func__, queue_id);
3062                 return 0;
3063         }
3064
3065         vq = dev->virtqueue[queue_id];
3066
3067         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
3068                 return 0;
3069
3070         if (unlikely(!vq->enabled)) {
3071                 count = 0;
3072                 goto out_access_unlock;
3073         }
3074
3075         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3076                 vhost_user_iotlb_rd_lock(vq);
3077
3078         if (unlikely(!vq->access_ok))
3079                 if (unlikely(vring_translate(dev, vq) < 0)) {
3080                         count = 0;
3081                         goto out;
3082                 }
3083
3084         /*
3085          * Construct a RARP broadcast packet, and inject it to the "pkts"
3086          * array, to looks like that guest actually send such packet.
3087          *
3088          * Check user_send_rarp() for more information.
3089          *
3090          * broadcast_rarp shares a cacheline in the virtio_net structure
3091          * with some fields that are accessed during enqueue and
3092          * __atomic_compare_exchange_n causes a write if performed compare
3093          * and exchange. This could result in false sharing between enqueue
3094          * and dequeue.
3095          *
3096          * Prevent unnecessary false sharing by reading broadcast_rarp first
3097          * and only performing compare and exchange if the read indicates it
3098          * is likely to be set.
3099          */
3100         if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
3101                         __atomic_compare_exchange_n(&dev->broadcast_rarp,
3102                         &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
3103
3104                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
3105                 if (rarp_mbuf == NULL) {
3106                         VHOST_LOG_DATA(ERR, "(%s) failed to make RARP packet.\n", dev->ifname);
3107                         count = 0;
3108                         goto out;
3109                 }
3110                 /*
3111                  * Inject it to the head of "pkts" array, so that switch's mac
3112                  * learning table will get updated first.
3113                  */
3114                 pkts[0] = rarp_mbuf;
3115                 pkts++;
3116                 count -= 1;
3117         }
3118
3119         if (vq_is_packed(dev)) {
3120                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3121                         count = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
3122                 else
3123                         count = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
3124         } else {
3125                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3126                         count = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
3127                 else
3128                         count = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
3129         }
3130
3131 out:
3132         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3133                 vhost_user_iotlb_rd_unlock(vq);
3134
3135 out_access_unlock:
3136         rte_spinlock_unlock(&vq->access_lock);
3137
3138         if (unlikely(rarp_mbuf != NULL))
3139                 count += 1;
3140
3141         return count;
3142 }